Lare3d/000755 000765 000024 00000000000 11430543216 012263 5ustar00Tonystaff000000 000000 Lare3d/._.DS_Store000644 000765 000024 00000000122 11430543022 014151 0ustar00Tonystaff000000 000000 Mac OS X  2 R@Lare3d/.DS_Store000644 000765 000024 00000014004 11430543022 013740 0ustar00Tonystaff000000 000000 Bud1%  @€ @€ @€ @ E%DSDB`€ @€ @€ @Lare3d/Data/000755 000765 000024 00000000000 11430543226 013135 5ustar00Tonystaff000000 000000 Lare3d/._Makefile000644 000765 000024 00000000272 11430524464 014145 0ustar00Tonystaff000000 000000 Mac OS X  2ˆºTxMtATTR;…#º˜"˜"com.macromates.caret{ column = 0; line = 19; }Lare3d/Makefile000644 000765 000024 00000007543 11430524464 013740 0ustar00Tonystaff000000 000000 # Set the compiler flags #FFLAGS = -fast#-fast #-arch pn4 -tpp7 -tune pn4 -ax #FFLAGS = -r8 -fast -fastsse -O3 -Mipa=fast -Minline -Munroll #PGI optimised #FFLAGS = -Mbounds -g #PGI Debug #FFLAGS = -O3 -fast #Intel #FFLAGS = -fpe0 -nothreads -traceback -fltconsistency -CB -g #Intel Debug FFLAGS = -O3 -fast # Set some of the build parameters TARGET = lare3d #Uncomment the following line to use Qmono viscosity #QMONO = -DQ_MONO #Uncomment the following line to run in single precision #QSINGLE = -DQ_SINGLE #Uncomment the following line to use first order scheme for resistive update QFIRSTORDER = -DQ_FIRSTORDER # -------------------------------------------------- # Shouldn't need to touch below here # -------------------------------------------------- SRCDIR = src OBJDIR = obj BINDIR = bin MODULEFLAG = -module OPFLAGS = $(QMONO) $(QSINGLE) $(QFIRSTORDER) FC = mpif90 $(OPFLAGS) PREPROFLAGS = $(NONMPIIO) OBJFILES = shared_data.o mpi_routines.o openboundary.o mpiboundary.o boundary.o normalise.o conduct.o diagnostics.o setup.o lagran.o \ remap.o xremap.o yremap.o zremap.o initial_conditions.o\ output_cartesian.o iocontrol.o output.o iocommon.o input.o inputfunctions.o\ input_cartesian.o eos.o neutral.o control.o\ welcome.o lare3d.o FULLTARGET = $(BINDIR)/$(TARGET) #vpath %.f90 $(SRCDIR) #vpath %.o $(OBJDIR) VPATH = $(SRCDIR):$(OBJDIR):$(SRCDIR)/core:$(SRCDIR)/io/ # Rule to build the fortran files %.o: %.f90 @mkdir -p $(BINDIR) $(OBJDIR) $(FC) -c $(FFLAGS) $(MODULEFLAG) $(OBJDIR) -o $(OBJDIR)/$@ $< %.o: %.F90 @mkdir -p $(BINDIR) $(OBJDIR) $(FC) -c $(FFLAGS) $(MODULEFLAG) $(OBJDIR) -o $(OBJDIR)/$@ $(PREPROFLAGS) $< $(FULLTARGET): $(OBJFILES) $(FC) $(FFLAGS) $(MODULEFLAG) $(OBJDIR) -o $@ $(addprefix $(OBJDIR)/,$(OBJFILES)) .PHONEY: clean clean: @rm -rf *~ $(BINDIR) $(OBJDIR) *.pbs.* *.sh.* $(SRCDIR)/*~ $(SRCDIR)/core/*~ $(SRCDIR)/io/*~ *.log .PHONEY: tidy tidy: @rm -rf $(OBJDIR) *.pbs.* *.sh.* $(SRCDIR)/*~ *.log .PHONEY: touch touch: @touch src/* ; touch src/core/* .PHONEY: datatidy datatidy: @rm -rf Data/* .PHONEY: visit visit: @cd VisIT;xml2makefile -clobber cfd.xml;make .PHONEY: visitclean visitclean: @cd VisIT;make clean;rm -f .depend # All the dependencies shared_data.o:shared_data.F90 mpi_routines.o:mpi_routines.f90 shared_data.o normalise.o:normalise.f90 shared_data.o setup.o:setup.F90 shared_data.o normalise.o iocommon.o iocontrol.o input.o input_cartesian.o mpiboundary.o: mpiboundary.f90 shared_data.o openboundary.o: openboundary.f90 shared_data.o boundary.o:boundary.f90 shared_data.o mpiboundary.o xremap.o:xremap.f90 shared_data.o boundary.o yremap.o:yremap.f90 shared_data.o boundary.o zremap.o:zremap.f90 shared_data.o boundary.o diagnostics.o:diagnostics.F90 shared_data.o boundary.o normalise.o output_cartesian.o output.o iocontrol.o eos.o iocommon.o:iocommon.f90 shared_data.o output.o:output.f90 shared_data.o iocommon.o output_cartesian.o: output_cartesian.f90 shared_data.o iocommon.o output.o iocontrol.o: iocontrol.f90 shared_data.o iocommon.o output.o input.o input.o: input.f90 shared_data.o iocommon.o inputfunctions.o inputfunctions.o: inputfunctions.f90 shared_data.o iocommon.o input_cartesian.o: input_cartesian.f90 iocommon.o inputfunctions.o conduct.o:conduct.f90 shared_data.o boundary.o eos.o lagran.o:lagran.F90 shared_data.o boundary.o diagnostics.o normalise.o eos.o neutral.o conduct.o remap.o:remap.f90 shared_data.o xremap.o yremap.o zremap.o initial_conditions.o:initial_conditions.f90 shared_data.o normalise.o eos.o neutral.o eos.o:eos.F90 shared_data.o normalise.o neutral.o: neutral.f90 shared_data.o boundary.o normalise.o eos.o control.o: control.f90 shared_data.o normalise.o welcome.o: welcome.f90 shared_data.o lare3d.o:lare3d.f90 shared_data.o setup.o boundary.o diagnostics.o lagran.o remap.o mpi_routines.o welcome.o initial_conditions.o openboundary.o eos.o control.o Lare3d/._src000755 000765 000024 00000000426 11430520723 013215 0ustar00Tonystaff000000 000000 Mac OS X  2äATTR;…$¨n¨n%com.apple.metadata:kMDItemWhereFromsbplist00¡_?sftp://francesca.csc.warwick.ac.uk//gpfs/space/phsgi/Lare3d/src LLare3d/src/000755 000765 000024 00000000000 11430520723 013050 5ustar00Tonystaff000000 000000 Lare3d/src/boundary.f90000644 000765 000024 00000025630 11406701277 015231 0ustar00Tonystaff000000 000000 !** ! All the ghost cell values are controlled by these routines. ! To speed things up it may be worth having this routine hard coded ! for each particular run, i.e. remove all if statements. !** MODULE boundary USE shared_data USE mpiboundary IMPLICIT NONE INTEGER :: ndx, ndy, ndz CONTAINS SUBROUTINE set_boundary_conditions REAL(num) :: a LOGICAL :: first_call = .TRUE. IF (first_call) THEN any_open = .FALSE. first_call = .FALSE. IF ((xbc_right == BC_OPEN) .OR. (xbc_left == BC_OPEN) & .OR. (ybc_up == BC_OPEN) .OR. (ybc_down == BC_OPEN) & .OR. (zbc_front == BC_OPEN) .OR. (zbc_back == BC_OPEN)) & any_open = .TRUE. ELSE ! when bzone = 0 uses first order remap scheme so that farfield not ! used in remap bzone = 1.0_num IF (xbc_right == BC_OPEN .AND. right == MPI_PROC_NULL) & bzone(nx-4:nx+2, :, :) = 0.0_num IF (xbc_left == BC_OPEN .AND. left == MPI_PROC_NULL) & bzone(-1:4, :, :) = 0.0_num IF (ybc_up == BC_OPEN .AND. up == MPI_PROC_NULL) & bzone(:, ny-4:ny+2, :) = 0.0_num IF (ybc_down == BC_OPEN .AND. down == MPI_PROC_NULL) & bzone(:, -1:4, :) = 0.0_num IF (zbc_back == BC_OPEN .AND. back == MPI_PROC_NULL) & bzone(:, :, nz-4:nz+1) = 0.0_num IF (zbc_front == BC_OPEN .AND. front == MPI_PROC_NULL) & bzone(:, :, -1:4) = 0.0_num ! define region for linear damping of solution at boundaries ! note if ndx > nx damping only in outer process a = length_x / 10.0_num DO ix = 0, nx ndx = ix IF ((xb(ix) - xb(0)) > a) EXIT END DO a = length_y / 10.0_num DO iy = 0, ny ndy = iy IF ((yb(iy) - yb(0)) > a) EXIT END DO a = length_z / 10.0_num DO iz = 0, nz ndz = iz IF ((zb(iz) - zb(0)) > a) EXIT END DO END IF END SUBROUTINE set_boundary_conditions SUBROUTINE boundary_conditions CALL damp_boundaries CALL bfield_bcs CALL energy_bcs CALL density_bcs CALL velocity_bcs END SUBROUTINE boundary_conditions SUBROUTINE damp_boundaries REAL(num) :: a IF (damping) THEN IF (right == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO iy = -2, ny+2 DO ix = nx-ndx, nx+2 a = dt * REAL(ix - (nx-ndx), num) / REAL(ndx, num) vx(ix, iy, iz) = vx(ix, iy, iz) / (1.0_num + a) vy(ix, iy, iz) = vy(ix, iy, iz) / (1.0_num + a) vz(ix, iy, iz) = vz(ix, iy, iz) / (1.0_num + a) END DO END DO END DO END IF IF (left == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO iy = -2, ny+2 DO ix = -2, ndx a = dt * REAL((ndx - ix), num) / REAL(ndx, num) vx(ix, iy, iz) = vx(ix, iy, iz) / (1.0_num + a) vy(ix, iy, iz) = vy(ix, iy, iz) / (1.0_num + a) vz(ix, iy, iz) = vz(ix, iy, iz) / (1.0_num + a) END DO END DO END DO END IF IF (up == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO iy = ny-ndy, ny+2 DO ix = -2, nx+2 a = dt * REAL(iy - (ny-ndy), num) / REAL(ndy, num) vx(ix, iy, iz) = vx(ix, iy, iz) / (1.0_num + a) vy(ix, iy, iz) = vy(ix, iy, iz) / (1.0_num + a) vz(ix, iy, iz) = vz(ix, iy, iz) / (1.0_num + a) END DO END DO END DO END IF IF (down == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO iy = -2, ndy DO ix = -2, nx+2 a = dt * REAL((ndy - iy), num) / REAL(ndy, num) vx(ix, iy, iz) = vx(ix, iy, iz) / (1.0_num + a) vy(ix, iy, iz) = vy(ix, iy, iz) / (1.0_num + a) vz(ix, iy, iz) = vz(ix, iy, iz) / (1.0_num + a) END DO END DO END DO END IF IF (back == MPI_PROC_NULL) THEN DO iz = nz-ndz, nz+2 DO iy = -2, ny+2 DO ix = -2, nx+2 a = dt * REAL(iz - (nz-ndz), num) / REAL(ndz, num) vx(ix, iy, iz) = vx(ix, iy, iz) / (1.0_num + a) vy(ix, iy, iz) = vy(ix, iy, iz) / (1.0_num + a) vz(ix, iy, iz) = vz(ix, iy, iz) / (1.0_num + a) END DO END DO END DO END IF IF (front == MPI_PROC_NULL) THEN DO iz = -2, ndz DO iy = -2, ny+2 DO ix = -2, nx+2 a = dt * REAL((ndz - iz), num) / REAL(ndz, num) vx(ix, iy, iz) = vx(ix, iy, iz) / (1.0_num + a) vy(ix, iy, iz) = vy(ix, iy, iz) / (1.0_num + a) vz(ix, iy, iz) = vz(ix, iy, iz) / (1.0_num + a) END DO END DO END DO END IF END IF END SUBROUTINE damp_boundaries SUBROUTINE bfield_bcs CALL bfield_MPI IF (front == MPI_PROC_NULL .AND. zbc_front == BC_OTHER) THEN bx(:, :, -1) = bx(:, :, 2) bx(:, :, 0) = bx(:, :, 1) by(:, :, -1) = by(:, :, 2) by(:, :, 0) = by(:, :, 1) bz(:, :, -1) = bz(:, :, 1) bz(:, :, -2) = bz(:, :, 2) END IF IF (back == MPI_PROC_NULL .AND. zbc_back == BC_OTHER) THEN bx(:, :, nz+1) = bx(:, :, nz ) bx(:, :, nz+2) = bx(:, :, nz-1) by(:, :, nz+1) = by(:, :, nz ) by(:, :, nz+2) = by(:, :, nz-1) bz(:, :, nz+1) = bz(:, :, nz-1) bz(:, :, nz+2) = bz(:, :, nz-2) END IF IF (right == MPI_PROC_NULL .AND. xbc_right == BC_OTHER) THEN bx(nx+1, :, :) = bx(nx-1, :, :) bx(nx+2, :, :) = bx(nx-2, :, :) by(nx+1, :, :) = by(nx , :, :) by(nx+2, :, :) = by(nx-1, :, :) bz(nx+1, :, :) = bz(nx , :, :) bz(nx+2, :, :) = bz(nx-1, :, :) END IF IF (left == MPI_PROC_NULL .AND. xbc_left == BC_OTHER) THEN bx(-1, :, :) = bx(1, :, :) bx(-2, :, :) = bx(2, :, :) by( 0, :, :) = by(1, :, :) by(-1, :, :) = by(2, :, :) bz( 0, :, :) = bz(1, :, :) bz(-1, :, :) = bz(2, :, :) END IF IF (down == MPI_PROC_NULL .AND. ybc_down == BC_OTHER) THEN bx(:, 0, :) = bx(:, 1, :) bx(:, -1, :) = bx(:, 2, :) by(:, -1, :) = by(:, 1, :) by(:, -2, :) = by(:, 2, :) bz(:, 0, :) = bz(:, 1, :) bz(:, -1, :) = bz(:, 2, :) END IF IF (up == MPI_PROC_NULL .AND. ybc_up == BC_OTHER) THEN bx(:, ny+1, :) = bx(:, ny , :) bx(:, ny+2, :) = bx(:, ny-1, :) by(:, ny+1, :) = by(:, ny-1, :) by(:, ny+2, :) = by(:, ny-2, :) bz(:, ny+1, :) = bz(:, ny , :) bz(:, ny+2, :) = bz(:, ny-1, :) END IF END SUBROUTINE bfield_bcs SUBROUTINE energy_bcs CALL energy_MPI IF (front == MPI_PROC_NULL .AND. zbc_front == BC_OTHER) THEN energy(:, :, 0) = energy(:, :, 1) energy(:, :, -1) = energy(:, :, 2) END IF IF (back == MPI_PROC_NULL .AND. zbc_back == BC_OTHER) THEN energy(:, :, nz+1) = energy(:, :, nz ) energy(:, :, nz+2) = energy(:, :, nz-1) END IF IF (right == MPI_PROC_NULL .AND. xbc_right == BC_OTHER) THEN energy(nx+1, :, :) = energy(nx , :, :) energy(nx+2, :, :) = energy(nx-1, :, :) END IF IF (left == MPI_PROC_NULL .AND. xbc_left == BC_OTHER) THEN energy( 0, :, :) = energy(1, :, :) energy(-1, :, :) = energy(2, :, :) END IF IF (down == MPI_PROC_NULL .AND. ybc_down == BC_OTHER) THEN energy(:, 0, :) = energy(:, 1, :) energy(:, -1, :) = energy(:, 2, :) END IF IF (up == MPI_PROC_NULL .AND. ybc_up == BC_OTHER) THEN energy(:, ny+1, :) = energy(:, ny , :) energy(:, ny+2, :) = energy(:, ny-1, :) END IF END SUBROUTINE energy_bcs SUBROUTINE velocity_bcs CALL velocity_MPI IF (front == MPI_PROC_NULL .AND. zbc_front == BC_OTHER) THEN vx(:, :, -2:0) = 0.0_num vy(:, :, -2:0) = 0.0_num vz(:, :, -2:0) = 0.0_num END IF IF (back == MPI_PROC_NULL .AND. zbc_back == BC_OTHER) THEN vx(:, :, nz:nz+2) = 0.0_num vy(:, :, nz:nz+2) = 0.0_num vz(:, :, nz:nz+2) = 0.0_num END IF IF (right == MPI_PROC_NULL .AND. xbc_right == BC_OTHER) THEN vx(nx:nx+2, :, :) = 0.0_num vy(nx:nx+2, :, :) = 0.0_num vz(nx:nx+2, :, :) = 0.0_num END IF IF (left == MPI_PROC_NULL .AND. xbc_left == BC_OTHER) THEN vx(-2:0, :, :) = 0.0_num vy(-2:0, :, :) = 0.0_num vz(-2:0, :, :) = 0.0_num END IF IF (up == MPI_PROC_NULL .AND. ybc_up == BC_OTHER) THEN vx(:, ny:ny+2, :) = 0.0_num vy(:, ny:ny+2, :) = 0.0_num vz(:, ny:ny+2, :) = 0.0_num END IF IF (down == MPI_PROC_NULL .AND. ybc_down == BC_OTHER) THEN vx(:, -2:0, :) = 0.0_num vy(:, -2:0, :) = 0.0_num vz(:, -2:0, :) = 0.0_num END IF END SUBROUTINE velocity_bcs SUBROUTINE remap_v_bcs CALL remap_v_MPI IF (front == MPI_PROC_NULL .AND. zbc_front == BC_OTHER) THEN vx1(:, :, -2:0) = 0.0_num vy1(:, :, -2:0) = 0.0_num vz1(:, :, -2:0) = 0.0_num END IF IF (back == MPI_PROC_NULL .AND. zbc_back == BC_OTHER) THEN vx1(:, :, nz:nz+2) = 0.0_num vy1(:, :, nz:nz+2) = 0.0_num vz1(:, :, nz:nz+2) = 0.0_num END IF IF (right == MPI_PROC_NULL .AND. xbc_right == BC_OTHER) THEN vx1(nx:nx+2, :, :) = 0.0_num vy1(nx:nx+2, :, :) = 0.0_num vz1(nx:nx+2, :, :) = 0.0_num END IF IF (left == MPI_PROC_NULL .AND. xbc_left == BC_OTHER) THEN vx1(-2:0, :, :) = 0.0_num vy1(-2:0, :, :) = 0.0_num vz1(-2:0, :, :) = 0.0_num END IF IF (up == MPI_PROC_NULL .AND. ybc_up == BC_OTHER) THEN vx1(:, ny:ny+2, :) = 0.0_num vy1(:, ny:ny+2, :) = 0.0_num vz1(:, ny:ny+2, :) = 0.0_num END IF IF (down == MPI_PROC_NULL .AND. ybc_down == BC_OTHER) THEN vx1(:, -2:0, :) = 0.0_num vy1(:, -2:0, :) = 0.0_num vz1(:, -2:0, :) = 0.0_num END IF END SUBROUTINE remap_v_bcs SUBROUTINE density_bcs CALL density_MPI IF (front == MPI_PROC_NULL .AND. zbc_front == BC_OTHER) THEN rho(:, :, -1) = rho(:, :, 2) rho(:, :, 0) = rho(:, :, 1) END IF IF (back == MPI_PROC_NULL .AND. zbc_back == BC_OTHER) THEN rho(:, :, nz+1) = rho(:, :, nz ) rho(:, :, nz+2) = rho(:, :, nz-1) END IF IF (right == MPI_PROC_NULL .AND. xbc_right == BC_OTHER) THEN rho(nx+1, :, :) = rho(nx , :, :) rho(nx+2, :, :) = rho(nx-1, :, :) END IF IF (left == MPI_PROC_NULL .AND. xbc_left == BC_OTHER) THEN rho( 0, :, :) = rho(1, :, :) rho(-1, :, :) = rho(2, :, :) END IF IF (down == MPI_PROC_NULL .AND. ybc_down == BC_OTHER) THEN rho(:, 0, :) = rho(:, 1, :) rho(:, -1, :) = rho(:, 2, :) END IF IF (up == MPI_PROC_NULL .AND. ybc_up == BC_OTHER) THEN rho(:, ny+1, :) = rho(:, ny , :) rho(:, ny+2, :) = rho(:, ny-1, :) END IF END SUBROUTINE density_bcs END MODULE boundary Lare3d/src/._control.f90000644 000765 000024 00000000273 11430531260 015265 0ustar00Tonystaff000000 000000 Mac OS X  2‰»TxMtATTR;…&»˜#˜#com.macromates.caret{ column = 48; line = 83; }Lare3d/src/control.f90000644 000765 000024 00000015771 11430531260 015061 0ustar00Tonystaff000000 000000 MODULE control USE shared_data USE normalise IMPLICIT NONE PRIVATE PUBLIC :: user_normalisation, control_variables, set_output_dumps CONTAINS SUBROUTINE user_normalisation ! Set the normalising constants for LARE ! This is needed to allow the use of some physics ! Modules which are coded in SI units ! Should the code use SI units SI = .FALSE. ! Gamma is the ratio of specific heat capacities gamma = 5.0_num / 3.0_num ! Average mass of an ion in proton masses ! The code assumes a single ion species with this mass mf = 1.0_num ! The equations describing the normalisation in LARE ! Have three free parameters which must be specified by ! The end user. These must be the normailisation used for ! your initial conditions if not running in SI, otherwise ! they are arbitrary. ! Magnetic field normalisation in Tesla B0 = 2.e-3_num ! Length normalisation in m L0 = 1.e6_num ! Density normalisation in kg / m^3 RHO0 = 1.6726e-12_num END SUBROUTINE user_normalisation SUBROUTINE control_variables ! Set the number of gridpoints in x and y directions nx_global = 128 ny_global = 128 nz_global = 256 ! Set the maximum number of iterations of the core solver before the code ! terminates. If nsteps < 0 then the code will run until t = t_end nsteps = 60 ! The maximum runtime of the code ! If SI_Input is true then this is in seconds t_end = 300.0_num ! Shock viscosities as detailed in manual - they are dimensionless visc1 = 0.1_num visc2 = 0.5_num ! Real viscosity expressed as the inverse Reynolds number, i.e. the ! same for normalised and SI input visc3 = 0.0_num ! Set these constants to manually ! override the domain decomposition. ! If either constant is set to zero ! then the code will try to automatically ! decompose in this direction nprocx = 0 nprocy = 0 nprocz = 0 ! The length of the domain in the x direction ! If SI_Input is true then this is in metres x_start = -2.0_num x_end = 2.0_num ! Should the x grid be stretched or uniform x_stretch = .FALSE. ! The length of the domain in the y direction ! If SI_Input is true then this is in metres y_start = -2.0_num y_end = 2.0_num ! Should the y grid be stretched of uniform y_stretch = .FALSE. z_start = -10.0_num z_end = 10.0_num z_stretch = .FALSE. ! Turn on or off the resistive parts of the MHD equations resistive_mhd = .TRUE. ! The background resistivity expressed as the inverse Lundquist ! number, i.e. the same for normalised and SI input eta_background = 1.e-4_num ! The critical current for triggering anomalous resistivity ! and the resistivity when above the critical current ! The resistivity is expressed as the inverse Lundquist number, i.e. the ! same for normalised and SI input, bit the j_max must be in SI ! if using SI units j_max = 5.0_num eta0 = 1.e-3_num ! Turn on or off the hall_mhd term in the MHD equations ! Well actually this does nothing as it isn't fully ! included yet! Sorry! hall_mhd = .FALSE. ! Set the ion skin depth. If SI_Input is true then ! This is in metres. Note that this should be fixed to ! (the speed of light) / (ion plasma frequency) with the ! plasma frequncy fixed by the chosen normalisation of ! density (through the normalising mass density). Here ! it is treated as a free pararmeter so be careful! lambda0 = 0.0_num ! Turn on or off the Braginskii thermal conduction term in ! the MHD equations ! WARNING: this is not robust. It is known to have problems ! with steep temperature gradients and very hot regions with ! large thermal conductivity. For many problems it is however ! fine. conduction = .FALSE. ! Apply a flux limiter to stop heat flows exceeding free streaming limit ! this is an experimental feature heat_flux_limiter = .FALSE. ! Fraction of free streaming heat flux used in limiter flux_limiter = 0.01_num ! Remap kinetic energy correction. LARE does not ! perfectly conserve kinetic energy during the remap step ! This missing energy can be added back into the simulation ! as a uniform heating. Turning rke to true turns on this ! addition rke = .TRUE. ! The code to choose the initial conditions. The valid choices are ! IC_NEW - Use set_initial_conditions in "initial_conditions.f90" ! to setup new initial conditions ! IC_RESTART - Load the output file with index restart_snapshot and ! use it as the initial conditions initial = IC_NEW restart_snapshot = 1 ! Turn on or off the physics package dealing with partial ionisation ! If include_neutrals is true then the code will calculate the ! ionisation fraction of the plasma at each point in the domain include_neutrals = .FALSE. ! If cowling_resistivity is true then the code calculates and ! applies the Cowling Resistivity to the MHD equations cowling_resistivity = .FALSE. ! Set the boundary conditions on the four edges of the simulation domain ! Valid constants are ! BC_PERIODIC - Periodic boundary conditions ! BC_OPEN - Reimann characteristic boundary conditions ! BC_OTHER - Other boundary conditions specified in "boundary.f90" xbc_left = BC_OTHER xbc_right = BC_OTHER ybc_up = BC_OTHER ybc_down = BC_OTHER zbc_front = BC_OTHER zbc_back = BC_OTHER ! set to true to turn on routine for damped boundaries damping = .FALSE. ! Set the equation of state. Valid choices are ! EOS_IDEAL - Simple ideal gas for perfectly ionised plasma ! EOS_PI - Simple ideal gas for partially ionised plasma ! EOS_ION - EOS_PI plus the ionisation potential eos_number = EOS_IDEAL END SUBROUTINE control_variables SUBROUTINE set_output_dumps ! The output directory for the code data_dir = "Data" ! The interval between output snapshots. If SI_Input is true ! Then this is in seconds dt_snapshots = 1.0_num ! dump_mask is an array which specifies which quantities the ! code should output to disk in a data dump. ! The codes are ! 1 - rho ! 2 - energy ! 3 - vx ! 4 - vy ! 5 - vz ! 6 - bx ! 7 - by ! 8 - bz ! 9 - temperature ! 10 - pressure ! 11 - cs (sound speed) ! 12 - parallel_current ! 13 - perp_current ! 14 - neutral_faction ! 15 - eta_perp ! 16 - eta ! 17 - jx ! 18 - jy ! 19 - jz ! If a given element of dump_mask is true then that field is dumped ! If the element is false then the field isn't dumped ! N.B. if dump_mask(1:8) not true then the restart will not work dump_mask = .FALSE. dump_mask(1:11) = .TRUE. IF (include_neutrals) dump_mask(14) = .TRUE. IF (resistive_mhd) dump_mask(16:20) = .TRUE. IF (cowling_resistivity) dump_mask(15) = .TRUE. END SUBROUTINE set_output_dumps END MODULE control Lare3d/src/core/000755 000765 000024 00000000000 11430520723 014000 5ustar00Tonystaff000000 000000 Lare3d/src/diagnostics.F90000644 000765 000024 00000040273 11430520723 015645 0ustar00Tonystaff000000 000000 !*** ! Controls all I/O and diagnostics. Output files are 'lare2d.dat', ! 'control.dat', 'en.dat' and a series of snapshots in 'fort.5x' ! The idl package in 'plot.pro' gives simple loading and surface ! plotting based on these files. This isn't documented but is very simple! !*** MODULE diagnostics USE shared_data; USE boundary; USE normalise USE eos USE output_cartesian USE output USE iocontrol IMPLICIT NONE PRIVATE PUBLIC :: set_dt, output_routines, energy_correction CONTAINS SUBROUTINE output_routines(i) ! i = step index ! if halt set to false then code stops INTEGER, INTENT(IN) :: i INTEGER, PARAMETER :: out = 1000 INTEGER, SAVE :: index = 1, step = 1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: data LOGICAL :: print_arrays, last_call REAL(num), DIMENSION(3) :: stagger = 0.0_num INTEGER, DIMENSION(3) :: dims ! this output routine uses the same sturcture as needed for mpi output ! this is more complicated than need for the serial code ! rank always equals zero in this serial code CHARACTER(LEN = 9+data_dir_max_length+n_zeros) :: filename CHARACTER(LEN = 35) :: filename_desc REAL(num) :: t_out = 0.0_num REAL(num) :: en_ke = 0.0_num, en_int = 0.0_num REAL(num) :: en_b = 0.0_num, heating_visc = 0.0_num REAL(num) :: heating_ohmic = 0.0_num REAL(num) :: total dims = (/ nx_global+1, ny_global+1, nz_global+1 /) IF (nsteps >= out) step = nsteps / out + 1 ! make sure output fits arrays IF (i == 0 .AND. rank == 0) THEN ! done just once at the start CALL output_log IF (.NOT. restart) WRITE(30) num, 6 END IF IF (MOD(i, step) .EQ. 0 .OR. last_call) THEN ! do every (step) steps t_out = time CALL energy_account(en_b, en_ke, en_int) CALL MPI_ALLREDUCE(total_visc_heating, total, 1, mpireal, MPI_SUM, & comm, errcode) heating_visc = total CALL MPI_ALLREDUCE(total_ohmic_heating, total, 1, mpireal, MPI_SUM, & comm, errcode) heating_ohmic = total IF (rank .EQ. 0) THEN WRITE(30) t_out, en_b, en_ke, en_int WRITE(30) heating_visc, heating_ohmic END IF index = index + 1 END IF CALL io_test(i, print_arrays, last_call) ! check if snapshot is needed print_arrays = .FALSE. IF (print_arrays) THEN ! output a snapshot of arrays IF (rank .EQ. 0) THEN WRITE(20, *) "Dumping ", output_file, " at time", time * t0 CALL FLUSH(20) END IF ! Set the filename WRITE(filename_desc, '("(''nfs:'', a, ''/'', i", i3.3, ".", i3.3, & & ", ''.cfd'')")'), n_zeros, n_zeros WRITE(filename, filename_desc) TRIM(data_dir), output_file CALL cfd_open(filename, rank, comm, MPI_MODE_CREATE + MPI_MODE_WRONLY) CALL cfd_write_snapshot_data(REAL(time * t0,dbl), i, 0) ALLOCATE(data(0:nx, 0:ny, 0:nz)) CALL cfd_write_3d_cartesian_grid("Grid", "Grid", & xb_global(0:nx_global) * L0, yb_global(0:ny_global) * L0, & zb_global(0:nz_global) * L0, 0) IF (dump_mask(1)) THEN data = rho(0:nx, 0:ny, 0:nz) * rho0 CALL cfd_write_3d_cartesian_variable_parallel("Rho", "Fluid", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(2)) THEN data = energy(0:nx, 0:ny, 0:nz) * energy0 CALL cfd_write_3d_cartesian_variable_parallel("Energy", "Fluid", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(3)) THEN data = vx(0:nx, 0:ny, 0:nz) * vel0 CALL cfd_write_3d_cartesian_variable_parallel("Vx", "Velocity", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(4)) THEN data = vy(0:nx, 0:ny, 0:nz) * vel0 CALL cfd_write_3d_cartesian_variable_parallel("Vy", "Velocity", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(5)) THEN data = vz(0:nx, 0:ny, 0:nz) * vel0 CALL cfd_write_3d_cartesian_variable_parallel("Vz", "Velocity", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(6)) THEN data = bx(0:nx, 0:ny, 0:nz) * B0 CALL cfd_write_3d_cartesian_variable_parallel("Bx", "Magnetic_Field", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(7)) THEN data = by(0:nx, 0:ny, 0:nz) * B0 CALL cfd_write_3d_cartesian_variable_parallel("By", "Magnetic_Field", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(8)) THEN data = bz(0:nx, 0:ny, 0:nz) * B0 CALL cfd_write_3d_cartesian_variable_parallel("Bz", "Magnetic_Field", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(9)) THEN DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx CALL get_temp(rho(ix, iy, iz), energy(ix, iy, iz), & eos_number, ix, iy, iz, data(ix, iy, iz)) END DO END DO END DO data = data * temp0 CALL cfd_write_3d_cartesian_variable_parallel("Temperature", "Fluid", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(10)) THEN DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx CALL get_pressure(rho(ix, iy, iz), energy(ix, iy, iz), & eos_number, ix, iy, iz, data(ix, iy, iz)) END DO END DO END DO data = data * pressure0 CALL cfd_write_3d_cartesian_variable_parallel("Pressure", "Fluid", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(11)) THEN data = SQRT(gamma*(gamma - 1.0_num) * energy(0:nx,0:ny,0:nz)) * vel0 CALL cfd_write_3d_cartesian_variable_parallel("cs", "Fluid", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(12)) THEN data = parallel_current(0:nx, 0:ny, 0:nz) * j0 CALL cfd_write_3d_cartesian_variable_parallel("j_par", "PIP", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(13)) THEN data = perp_current(0:nx, 0:ny, 0:nz) * j0 CALL cfd_write_3d_cartesian_variable_parallel("j_perp", "PIP", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(14)) THEN data = xi_n(0:nx, 0:ny, 0:nz) CALL cfd_write_3d_cartesian_variable_parallel("neutral_fraction", & "PIP", dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(15)) THEN data = eta_perp(0:nx, 0:ny, 0:nz) * res0 CALL cfd_write_3d_cartesian_variable_parallel("eta_perp", "PIP", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(16)) THEN data = eta(0:nx, 0:ny, 0:nz) * res0 CALL cfd_write_3d_cartesian_variable_parallel("eta", "PIP", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(17)) THEN data = jx_r(0:nx, 0:ny, 0:nz) * j0 CALL cfd_write_3d_cartesian_variable_parallel("jx", "current", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(18)) THEN data = jy_r(0:nx, 0:ny, 0:nz) * j0 CALL cfd_write_3d_cartesian_variable_parallel("jy", "current", & dims, stagger, "Grid", "Grid", data, subtype) END IF IF (dump_mask(19)) THEN data = jz_r(0:nx, 0:ny, 0:nz) * j0 CALL cfd_write_3d_cartesian_variable_parallel("jz", "current", & dims, stagger, "Grid", "Grid", data, subtype) END IF #ifdef PARALLEL_DEBUG data = rank CALL cfd_write_3d_cartesian_variable_parallel("rank", "parallel", & dims, stagger, "Grid", "Grid", data, subtype) #endif ! Close the file CALL cfd_close() output_file = output_file + 1 END IF IF (last_call .AND. rank == 0) THEN ! output energy diagnostics etc WRITE(20, *) 'final nsteps / time =', i, time * t0 END IF END SUBROUTINE output_routines SUBROUTINE io_test(i, print_arrays, last_call) INTEGER, INTENT(IN) :: i LOGICAL, INTENT(OUT) :: print_arrays, last_call REAL(num), SAVE :: t1 = 0.0_num IF (restart) THEN t1 = time restart = .FALSE. END IF print_arrays = .FALSE. last_call = .FALSE. IF (time >= t1) THEN print_arrays = .TRUE. t1 = t1 + dt_snapshots END IF IF (time >= t_end .OR. i == nsteps) THEN last_call = .TRUE. print_arrays = .TRUE. END IF END SUBROUTINE io_test SUBROUTINE set_dt ! sets CFL limited step ! Assumes all variables are defined at the same point. Be careful ! with setting 'dt_multiplier' if you expect massive changes across ! cells. REAL(num) :: cons, dt1, dt3, dt4, dt5, dt_local, dxlocal REAL(num) :: dtr_local, dth_local, cs dt_local = largest_number dtr_local = largest_number dth_local = largest_number cons = gamma * (gamma - 1.0_num) DO iz = -1, nz+2 izm = iz - 1 DO iy = -1, ny+2 iym = iy - 1 DO ix = -1, nx+2 ixm = ix - 1 w1 = bx(ix, iy, iz)**2 + by(ix, iy, iz)**2 + bz(ix, iy, iz)**2 cs = cons * energy(ix,iy,iz) ! sound speed squared w2 = SQRT(cs + w1 / MAX(rho(ix, iy, iz), none_zero)) & + 2.0_num * SQRT(p_visc(ix, iy, iz) & / MAX(rho(ix, iy, iz), none_zero)) w2 = w2 * (1.0_num + visc1) ! find ideal MHD CFL limit dt1 = dxb(ix) / (w2 + ABS(vx(ix, iy, iz))) dt_local = MIN(dt_local, dt1) dt1 = dyb(iy) / (w2 + ABS(vy(ix, iy, iz))) dt_local = MIN(dt_local, dt1) dt1 = dzb(iz) / (w2 + ABS(vz(ix, iy, iz))) dt_local = MIN(dt_local, dt1) ! note resistive limits assumes uniform resistivity hence cautious ! factor 0.2 dxlocal = 1.0_num / (1.0_num / dxb(ix)**2 & + 1.0_num / dyb(iy)**2 + 1.0_num / dzb(iz)**2) IF (cowling_resistivity) THEN dt3 = 0.2_num * dxlocal & / MAX(MAX(eta(ix, iy, iz), eta_perp(ix, iy, iz)), none_zero) ELSE dt3 = 0.2_num * dxlocal / MAX(eta(ix, iy, iz), none_zero) END IF ! Hall MHD CFL limit dt4 = 0.75_num * rho(ix, iy, iz) * MIN(dxb(ix), dyb(iy), dzb(iz))**2 & / MAX(lambda_i(ix, iy, iz) * SQRT(w1), none_zero) ! adjust to accomodate resistive effects dtr_local = MIN(dtr_local, dt3) dth_local = MIN(dth_local, dt4) ! correct to stop overlapping of Lagrangian cells w1 = ABS(vx(ix, iy, iz) - vx(ixm, iy, iz)) / dxb(ix) & + ABS(vy(ix, iy, iz) - vy(ix, iym, iz)) / dyb(iy) & + ABS(vz(ix, iy, iz) - vz(ix, iy, izm)) / dzb(iz) dt5 = 1.0_num / MAX(w1, none_zero) dt_local = MIN(dt_local, dt5) END DO END DO END DO CALL MPI_ALLREDUCE(dt_local, dt, 1, mpireal, MPI_MIN, comm, errcode) CALL MPI_ALLREDUCE(dtr_local, dtr, 1, mpireal, MPI_MIN, comm, errcode) CALL MPI_ALLREDUCE(dth_local, dth, 1, mpireal, MPI_MIN, comm, errcode) dtr = dt_multiplier * dtr dth = dt_multiplier * dth dt = dt_multiplier * dt time = time + dt END SUBROUTINE set_dt SUBROUTINE energy_account(energy_b, energy_ke, energy_int) REAL(num), INTENT(OUT) :: energy_b, energy_ke, energy_int REAL(dbl) :: energy_b_local, energy_ke_local, energy_int_local REAL(num) :: cv_v, rho_v, a, b, c energy_b_local = 0.0_dbl energy_ke_local = 0.0_dbl energy_int_local = 0.0_dbl DO iz = 1, nz izm = iz - 1 DO iy = 1, ny iym = iy - 1 DO ix = 1, nx ixm = ix - 1 w2 = (bx(ix, iy, iz)**2 + bx(ixm, iy, iz)**2) / 2.0_dbl w3 = (by(ix, iy, iz)**2 + by(ix, iym, iz)**2) / 2.0_dbl w4 = (bz(ix, iy, iz)**2 + bz(ix, iy, izm)**2) / 2.0_dbl w1 = (w2 + w3 + w4) / 2.0_dbl energy_b_local = energy_b_local + w1 * cv(ix, iy, iz) energy_int_local = energy_int_local & + energy(ix, iy, iz) * rho(ix, iy, iz) * cv(ix, iy, iz) END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 ! WARNING the KE is summed on the vertices rho_v = (rho(ix, iy, iz) * cv(ix, iy, iz) & + rho(ixp, iy , iz ) * cv(ixp, iy , iz ) & + rho(ix , iyp, iz ) * cv(ix , iyp, iz ) & + rho(ixp, iyp, iz ) * cv(ixp, iyp, iz ) & + rho(ix , iy , izp) * cv(ix , iy , izp) & + rho(ixp, iy , izp) * cv(ixp, iy , izp) & + rho(ix , iyp, izp) * cv(ix , iyp, izp) & + rho(ixp, iyp, izp) * cv(ixp, iyp, izp)) cv_v = (cv(ix, iy, iz) + cv(ixp, iy, iz) & + cv(ix, iyp, iz ) + cv(ixp, iyp, iz ) & + cv(ix, iy , izp) + cv(ixp, iy , izp) & + cv(ix, iyp, izp) + cv(ixp, iyp, izp)) rho_v = rho_v / cv_v cv_v = cv_v / 8.0_dbl w1 = rho_v * cv_v & * (vx(ix, iy, iz)**2 + vy(ix, iy, iz)**2 + vz(ix, iy, iz)**2) IF ((ix == 0) .OR. (ix == nx)) THEN w1 = w1 / 2.0_dbl END IF IF ((iy == 0) .OR. (iy == ny)) THEN w1 = w1 / 2.0_dbl END IF IF ((iz == 0) .OR. (iz == nz)) THEN w1 = w1 / 2.0_dbl END IF energy_ke_local = energy_ke_local + w1 / 2.0_dbl END DO END DO END DO a = REAL(energy_ke_local, num) b = REAL(energy_b_local, num) c = REAL(energy_int_local, num) CALL MPI_ALLREDUCE(a, energy_ke, 1, mpireal, MPI_SUM, & comm, errcode) CALL MPI_ALLREDUCE(b, energy_b, 1, mpireal, MPI_SUM, & comm, errcode) CALL MPI_ALLREDUCE(c, energy_int, 1, mpireal, MPI_SUM, & comm, errcode) END SUBROUTINE energy_account SUBROUTINE energy_correction delta_ke = -delta_ke WHERE (delta_ke < 0.0_num) delta_ke = 0.0_num delta_ke = delta_ke / (rho * cv) DO iz = 1, nz DO iy = 1, ny DO ix = 1, nx energy(ix, iy, iz) = energy(ix, iy, iz) + delta_ke(ix, iy, iz) END DO END DO END DO CALL energy_bcs END SUBROUTINE energy_correction SUBROUTINE output_log ! writes basic data to 'lare3d.dat' WRITE(20, *) 'Density normalisation = ', RHO0, ' kg m^(-3)' WRITE(20, *) 'Specific energy density normalisation = ', ENERGY0, ' K' WRITE(20, *) 'Velocity normalisation = ', VEL0, ' m s^(-1)' WRITE(20, *) 'Magnetic field normalisation = ', B0, ' T' WRITE(20, *) 'Length normalisation = ', L0, ' m' WRITE(20, *) 'Time normalisation = ', T0, ' s' WRITE(20, *) 'Viscosity normalisation = ', VISC0, ' m^2s^(-1)' WRITE(20, *) 'Gravity normalisation = ', GRAV0, ' ms^(-2)' WRITE(20, *) 'Resistivity normalisation = ', RES0, ' m^2s^(-1)' WRITE(20, *) 'Thermal conductivity normalisation = ', KAPPA0, & ' kgm^(-1)s^(-1) ' WRITE(20, *) 'Temperature normalisation =', TEMP0, 'K' WRITE(20, *) 'Pressure normalisation =', PRESSURE0, 'Pa' WRITE(20, *) 'nprocx, nprocy, nproca = ', nprocx, nprocy, nprocz WRITE(20, *) 'nx, ny, nz = ', nx, ny, nz WRITE(20, *) WRITE(20, *) 'length_x = ', length_x * L0 WRITE(20, *) 'length_y = ', length_y * L0 WRITE(20, *) 'length_z = ', length_z * L0 WRITE(20, *) #ifndef Q_MONO WRITE(20, *) 'tensor shock viscosity' #else WRITE(20, *) 'q_mono viscosity' #endif WRITE(20, *) 'linear viscosity coeff = ', visc1 WRITE(20, *) 'quadratic viscosity coeff = ', visc2 WRITE(20, *) 'uniform tensor viscosity coeff = ', visc3 * VISC0 WRITE(20, *) 'j_max = ', j_max * B0 / L0 WRITE(20, *) 'vc = ', vc * B0 / L0 WRITE(20, *) 'eta0 = ', eta0 * RES0 WRITE(20, *) 'eta_background = ', eta_background * RES0 WRITE(20, *) 'kappa = ', kappa_0 * KAPPA0 WRITE(20, *) WRITE(20, *) 't_start, t_end = ', time * T0, t_end * T0 WRITE(20, *) 'nsteps =', nsteps WRITE(20, *) END SUBROUTINE output_log END MODULE diagnostics Lare3d/src/initial_conditions.f90000644 000765 000024 00000011736 11430520723 017262 0ustar00Tonystaff000000 000000 MODULE initial_conditions USE shared_data USE eos USE neutral IMPLICIT NONE PRIVATE PUBLIC:: set_initial_conditions CONTAINS !--------------------------------------------------------------------------- ! This function sets up the initial condition for the code ! The variables which must be set are ! Rho - density ! V{x, y, z} - Velocities in x, y, z ! B{x, y, z} - Magnetic fields in x, y, z ! Energy - Specific internal energy ! Since temperature is a more intuitive quantity than specific internal energy ! There is a helper function get_energy which converts temperature to energy ! The syntax for this function is ! ! CALL get_energy(density, temperature, equation_of_state, ix, iy, & ! output_energy) ! ! REAL(num) :: density - The density at point (ix, iy) on the grid ! REAL(num) :: temperature - The temperature at point (ix, iy) on the grid ! INTEGER :: equation_of_state - The code for the equation of state to use. ! The global equation of state for the code is eos_number ! INTEGER :: ix - The current gridpoint in the x direction ! INTEGER :: iy - The current gridpoint in the y direction ! REAL(num) :: output_energy - The specific internal energy returned by ! the routine !--------------------------------------------------------------------------- SUBROUTINE set_initial_conditions INTEGER :: ix, iy ,iz REAL(num) :: alpha1, alpha2, B1 REAL(num) :: rc, rb, rbx, rby, b_theta, b_z, delta, B2, C2 REAL(num) :: k, amp, dx, dy, theta, v_perp, v_r, v_theta REAL(num) :: costh, sinth, coskz, sinkz, arg vx = 0.0_num vy = 0.0_num vz = 0.0_num rho = 1.0_num ! energy = 0.0_num energy = 1.30124e-4_num !For temperature = 1.0e4 Kelvin grav = 0.0_num ! Velocity perturbation k = 6.0_num*pi/20.0_num amp = 1.e-4_num dx = 0.1_num * dxb(nx/2) dy = 0.1_num * dyb(ny/2) DO ix = -1,nx+2 DO iy = -1,ny+2 DO iz = -1,nz+2 rc = SQRT(xc(ix)*xc(ix)+yc(iy)*yc(iy)) rb = SQRT(xb(ix)*xb(ix)+yb(iy)*yb(iy)) rbx = SQRT(xb(ix)*xb(ix)+yc(iy)*yc(iy)) rby = SQRT(xc(ix)*xc(ix)+yb(iy)*yb(iy)) IF (rb .LE. sqrt(dx**2 + dy**2)) THEN costh = 1.0_num sinth = 0.0_num ELSE costh = xb(ix)/rb sinth = yb(iy)/rb ENDIF coskz = cos(k*zb(iz)) sinkz = sin(k*zb(iz)) v_r = EXP(-rb**2*(1+(rb/0.5_num)**6))*COS(pi*zb(iz)/length_z)*& (costh*coskz + sinth*sinkz) ! Define the velocity at the cell boundaries IF (rb .LE. 1.0_num) THEN B2 = alpha2*((1.0_num-rb**2.0_num)**7.0_num-1.0_num)/7.0_num C2 = alpha2*rb*rb*(1.0_num-rb**2.0_num)**6.0_num b_z = sqrt(1.0_num + B2 - C2) b_theta = alpha1*rb*(1.0_num-rb*rb)**3.0_num v_perp = -((b_theta**2 + b_z**2)/& (b_z + k*rc*b_theta))*& (1.0_num - 2.0_num*rb**2 - 8.0_num*(rb/0.5_num)**6)*EXP(-4.0_num*rb**4)*& COS(pi*zb(iz)/length_z)*(sinth*coskz - costh*sinkz) v_theta = b_z*v_perp / (b_z**2 + b_theta**2) vz(ix,iy,iz) = amp*(-b_theta*v_perp / (b_z**2 +& b_theta**2)) vx(ix,iy,iz) = amp*(v_r*costh - v_theta*sinth) vy(ix,iy,iz) = amp*(v_r*sinth + v_theta*costh) ELSE b_z = sqrt(1.0_num - alpha2/7.0_num) b_theta = 0.0_num v_perp = -((b_theta**2 + b_z**2)/& (b_z + k*rb*b_theta))*& (1.0_num - 2.0_num*rb**2 - 8.0_num*(rb/0.5_num)**6)*EXP(-4.0_num*rb**4)*& COS(pi*zb(iz)/length_z)*(sinth*coskz - costh*sinkz) v_theta = b_z*v_perp / (b_z**2 + b_theta**2) vz(ix,iy,iz) = amp*(-b_theta*v_perp / (b_z**2 +& b_theta**2)) vx(ix,iy,iz) = amp*(v_r*costh - v_theta*sinth) vy(ix,iy,iz) = amp*(v_r*sinth + v_theta*costh) ENDIF alpha1 = 1.8_num alpha2 = alpha1*alpha1 B1 = 1.0_num ! Define Bz on face centred at (xc,yc,zb) IF (rc .LE. 1.0_num) THEN B2 = alpha2*((1.0_num-rc**2.0_num)**7.0_num-1.0_num)/7.0_num C2 = alpha2*rc*rc*(1.0_num-rc**2.0_num)**6.0_num bz(ix,iy,iz) = sqrt(1.0_num + B2 - C2) ELSE bz(ix,iy,iz) = sqrt(1.0_num - alpha2/7.0_num) ENDIF ! Define Bx on face centred at (xb,yc,zc) IF (rbx .LE. 1.0_num) THEN b_theta = alpha1*rbx*(1.0_num-rbx*rbx)**3.0_num bx(ix,iy,iz) = -b_theta * yc(iy) / rbx ELSE bx(ix,iy,iz) = 0.0_num ENDIF ! Define By on face centred at (xc,yb,zc) IF (rby .LE. 1.0_num) THEN b_theta = alpha1*rby*(1.0_num-rby*rby)**3.0_num by(ix,iy,iz) = b_theta * xc(ix) / rby ELSE by(ix,iy,iz) = 0.0_num ENDIF ENDDO ENDDO ENDDO END SUBROUTINE set_initial_conditions END MODULE initial_conditions Lare3d/src/io/000755 000765 000024 00000000000 11406627300 013461 5ustar00Tonystaff000000 000000 Lare3d/src/Old/000755 000765 000024 00000000000 11406627300 013570 5ustar00Tonystaff000000 000000 Lare3d/src/README000644 000765 000024 00000003265 11406627300 013740 0ustar00Tonystaff000000 000000 Lare2D main code directory -------------------------- Contains boundary.f90 control.f90 diagnostics.f90 initial_conditions.f90 ----------------------------------------------------------------- control.f90 control.f90 contains all the code parameters, such as the number of gridpoints, the length of the domain,the physics packages used, parameters for the packages etc. It is subdivided into three subroutines user_normalisation - User sets the parameters used in the code normalisation control_variables - User sets the basic code parameters. See the comments in the code for a full description set_output_dumps - User sets the information to be included in the output dumps from the code. ----------------------------------------------------------------- initial_conditions.f90 initial_conditions.f90 controls the initial conditions for the simulation. It contains only a single subroutine, called "set_initial_conditions", where the user must set the initial conditions for the variables rho - density energy - specific internal energy v{x,y,z} - Velocity in all three directions b{x,y,z} - Magnetic field in all three directions ----------------------------------------------------------------- boundary.f90 boundary.f90 controls the boundary conditions for the code. Details on the boundary conditions and how to change them are given in the LARE manual ----------------------------------------------------------------- diagnostics.f90 diagnostics.f90 performs the output of data to the output dump files, and must be changed by the user if additional output data is required. Changing the output dumps is described in the LARE manual -----------------------------------------------------------------Lare3d/src/Old/old_conditions.f90000644 000765 000024 00000033740 11406627300 017126 0ustar00Tonystaff000000 000000 ! This file contains example initial conditions used in previous simulations !kink unstable loop from Hood et a. A&A 2009 SUBROUTINE set_initial_conditions INTEGER:: ix, iy, iz REAL(num) :: hh REAL(num) :: alpha1, alpha2, B1 REAL(num) :: rc, rb, rbx, rby, b_theta, b_z, delta, B2, C2 REAL(num) :: k, amp, dx, dy, theta, v_perp, v_r, v_theta REAL(num) :: costh, sinth, coskz, sinkz, arg vx = 0.0_num vy = 0.0_num vz = 0.0_num ! Case 10 alpha1 = 1.8_num alpha2 = alpha1*alpha1 B1 = 1.0_num ! helicity hh = 0.0_num bx = 0.0_num by = 0.0_num bz = 0.0_num energy = 0.0_num rho = 1.0_num grav = 0.0_num ! Velocity perturbation !Case 3 perturbation k = 6.0_num*pi/20.0_num amp = 1.e-4_num dx = 0.1_num * dxb(nx/2) dy = 0.1_num * dyb(ny/2) DO ix = -1,nx+2 DO iy = -1,ny+2 DO iz = -1,nz+2 rc = SQRT(xc(ix)*xc(ix)+yc(iy)*yc(iy)) rb = SQRT(xb(ix)*xb(ix)+yb(iy)*yb(iy)) rbx = SQRT(xb(ix)*xb(ix)+yc(iy)*yc(iy)) rby = SQRT(xc(ix)*xc(ix)+yb(iy)*yb(iy)) IF (rb .LE. sqrt(dx**2 + dy**2)) THEN costh = 1.0_num sinth = 0.0_num ELSE costh = xb(ix)/rb sinth = yb(iy)/rb ENDIF coskz = cos(k*zb(iz)) sinkz = sin(k*zb(iz)) v_r = EXP(-rb**2*(1+(rb/0.5_num)**6))*COS(pi*zb(iz)/length_z)*& (costh*coskz + sinth*sinkz) ! ! Define the velocity at the cell boundaries ! IF (rb .LE. 1.0_num) THEN B2 = alpha2*((1.0_num-rb**2.0_num)**7.0_num-1.0_num)/7.0_num C2 = alpha2*rb*rb*(1.0_num-rb**2.0_num)**6.0_num b_z = sqrt(1.0_num + B2 - C2) b_theta = alpha1*rb*(1.0_num-rb*rb)**3.0_num v_perp = -((b_theta**2 + b_z**2)/& (b_z + k*rc*b_theta))*& (1.0_num - 2.0_num*rb**2 - 8.0_num*(rb/0.5_num)**6)*EXP(-4.0_num*rb**4)*& COS(pi*zb(iz)/length_z)*(sinth*coskz - costh*sinkz) v_theta = b_z*v_perp / (b_z**2 + b_theta**2) vz(ix,iy,iz) = amp*(-b_theta*v_perp / (b_z**2 +& b_theta**2)) vx(ix,iy,iz) = amp*(v_r*costh - v_theta*sinth) vy(ix,iy,iz) = amp*(v_r*sinth + v_theta*costh) ELSE b_z = sqrt(1.0_num - alpha2/7.0_num) b_theta = 0.0_num v_perp = -((b_theta**2 + b_z**2)/& (b_z + k*rb*b_theta))*& (1.0_num - 2.0_num*rb**2 - 8.0_num*(rb/0.5_num)**6)*EXP(-4.0_num*rb**4)*& COS(pi*zb(iz)/length_z)*(sinth*coskz - costh*sinkz) v_theta = b_z*v_perp / (b_z**2 + b_theta**2) vz(ix,iy,iz) = amp*(-b_theta*v_perp / (b_z**2 +& b_theta**2)) vx(ix,iy,iz) = amp*(v_r*costh - v_theta*sinth) vy(ix,iy,iz) = amp*(v_r*sinth + v_theta*costh) ENDIF ! ! Define Bz on face centred at (xc,yc,zb) ! IF (rc .LE. 1.0_num) THEN B2 = alpha2*((1.0_num-rc**2.0_num)**7.0_num-1.0_num)/7.0_num C2 = alpha2*rc*rc*(1.0_num-rc**2.0_num)**6.0_num bz(ix,iy,iz) = sqrt(1.0_num + B2 - C2) ELSE bz(ix,iy,iz) = sqrt(1.0_num - alpha2/7.0_num) ENDIF ! ! Define Bx on face centred at (xb,yc,zc) ! IF (rbx .LE. 1.0_num) THEN b_theta = alpha1*rbx*(1.0_num-rbx*rbx)**3.0_num bx(ix,iy,iz) = -b_theta * yc(iy) / rbx ELSE bx(ix,iy,iz) = 0.0_num ENDIF ! ! Define By on face centred at (xc,yb,zc) ! IF (rby .LE. 1.0_num) THEN b_theta = alpha1*rby*(1.0_num-rby*rby)**3.0_num by(ix,iy,iz) = b_theta * xc(ix) / rby ELSE by(ix,iy,iz) = 0.0_num ENDIF ENDDO ENDDO ENDDO END SUBROUTINE set_initial_conditions ! should give the model atmosphere, with bouyant flux tube, as in Arber et al. ApJ SUBROUTINE set_initial_conditions INTEGER :: ix, iy, iz, loop REAL(num) :: rc, x1, y1, b_theta, amp, k, r0, a=1.1_num, r1, mu, m=1.5 REAL(num):: b0, bz0, t_ph=1.0_num, t_cor=150.0_num, z_cor=25.0_num, wtr=5.0_num REAL(num) :: dg, w, q, lambda, r, bphi, b1, p0, p1, rho1, r_a, a1, a2, b REAL(num) :: maxerr, xi_v REAL(num), DIMENSION(:), ALLOCATABLE :: dzb_global, dzc_global,zc_global,grav_global REAL(num), DIMENSION(:), ALLOCATABLE :: rho_ref, energy_ref, t_ref, mu_m ALLOCATE(dzb_global(-1:nz_global+1), dzc_global(-1:nz_global), zc_global(-1:nz_global+1)) ALLOCATE(grav_global(-1:nz_global+2), mu_m(-1:nz_global+2)) ALLOCATE(rho_ref(-1:nz_global+2),energy_ref(-1:nz_global+2), t_ref(-1:nz_global+2)) ! Changed from James's version to remove the need for a seperate routine setting up ! the newton cooling arrays and the MPI calls. ! Set up the initial 1D hydrostatic equilibrium grav_global = 0.9727_num ! example of lowering g to zero in corona a1 = 60.0_num a2 = 80.0_num WHERE (zb_global > a1) grav_global = grav_global(0)*(1.0_num+COS(pi*(zb_global-a1)/(a2-a1))) & /2.0_num WHERE (zb_global > a2) grav_global = 0.0_num !Y.Fan atmosphere temp profile DO iz = -1, nz_global+1 ! needs to be +1 for the dzc calculation zc_global(iz) = 0.5_num * (zb_global(iz) + zb_global(iz-1)) IF (zc_global(iz) < 0.0_num) THEN t_ref(iz) = t_ph - (t_ph * a * zc_global(iz) * grav_global(iz) / (m+1.0_num)) ELSE t_ref(iz) = t_ph + ((t_cor-t_ph) * 0.5_num * (TANH((zc_global(iz)-z_cor)/wtr)+1.0_num)) ENDIF ENDDO t_ref(nz_global+2) = t_ref(nz_global+1) !solve HS eqn to get rho profile !density at bottom of domain rho_ref = 1.0_num DO iz = -1, nz_global dzb_global(iz) = zb_global(iz) - zb_global(iz-1) dzc_global(iz) = zc_global(iz+1) - zc_global(iz) ENDDO !solve for density mu_m = 0.5_num ! the reduced mass in units of proton mass IF (include_neutrals) xi_n = 0.0_num DO loop = 1, 100 maxerr = 0.0_num DO iz = nz_global, 0, -1 IF (zc_global(iz) < 0.0_num) THEN dg = 1.0_num / (dzb_global(iz)+dzb_global(iz-1)) rho_ref(iz-1) = rho_ref(iz) * (T_ref(iz)/dzc_global(iz-1)/mu_m(iz)& +grav_global(iz-1)*dzb_global(iz)*dg) rho_ref(iz-1) = rho_ref(iz-1) / (T_ref(iz-1)/dzc_global(iz-1)/mu_m(iz-1) & -grav_global(iz-1)*dzb_global(iz-1)*dg) END IF END DO !Now move from the photosphere up to the corona DO iz = 0, nz_global IF (zc_global(iz) > 0.0_num) THEN dg = 1.0_num / (dzb_global(iz)+dzb_global(iz-1)) rho_ref(iz) = rho_ref(iz-1) * (T_ref(iz-1)/dzc_global(iz-1)/mu_m(iz-1)& -grav_global(iz-1)*dzb_global(iz-1)*dg) rho_ref(iz) = rho_ref(iz) / (T_ref(iz)/dzc_global(iz-1)/mu_m(iz) & +grav_global(iz-1)*dzb_global(iz)*dg) END IF END DO IF (include_neutrals) THEN !note this always assumes EOS_PI DO iz = 0, nz_global xi_v = get_neutral(t_ref(iz), rho_ref(iz)) r1 = mu_m(iz) mu_m(iz) = 1.0_num / (2.0_num - xi_v) maxerr = MAX(maxerr, ABS(mu_m(iz) - r1)) END DO END IF IF (maxerr < 1.e-10_num) EXIT END DO rho_ref(nz_global+1:nz_global+2) = rho_ref(nz_global) ! set the relaxation rate, James used an exponent of -1.67, here I use the value ! in the 2D paper of -1.67. The tau equation is scaled by 1/t0 * 0.1 ! Convert into the local 3D arrays vx = 0.0_num vy = 0.0_num vz = 0.0_num grav = grav_global(coordinates(1)*nz-1:coordinates(1)*nz+nz+2) DO iy = -1, ny + 2 DO ix = -1, nx + 2 !store temperature in energy array for a few lines energy(ix,iy,:) = t_ref(coordinates(1)*nz-1:coordinates(1)*nz+nz+2) rho(ix,iy,:) = rho_ref(coordinates(1)*nz-1:coordinates(1)*nz+nz+2) ENDDO ENDDO DO iz = -1, nz + 2 DO iy = -1, ny + 2 DO ix = -1, nx + 2 r1 = energy(ix,iy,iz) CALL get_energy(rho(ix,iy,iz), r1, eos_number, ix, iy, iz, energy(ix,iy,iz)) END DO END DO END DO !add magnetic flux tube at (0,0,-10) and change pressure, dens, energy over it !grad(p1) matches lorentz force !MEQ at end of tube !pressure match at apex !seee Fan 2001 for details of initialisation !w = width of tube w = 2.0_num q = -(1.0_num/w) b0 = 5.0_num lambda = 20.0_num DO ix = -1,nx+2 DO iy = -1,ny+2 DO iz = -1,nz+2 !define bx,by,bz at correct points r = SQRT(yc(iy)**2 + (zc(iz)+10.0_num)**2) bx(ix,iy,iz) = b0 * EXP(-(r/w)**2) bphi = bx(ix,iy,iz) * q * r r = SQRT(yb(iy)**2 + (zc(iz)+10.0_num)**2) b1 = b0 * EXP(-(r/w)**2) by(ix,iy,iz) = -b1 * q * (zc(iz)+10.0_num) r = SQRT(yc(iy)**2 + (zb(iz)+10.0_num)**2) b1 = b0 * EXP(-(r/w)**2) bz(ix,iy,iz) = b1 * q * yc(iy) !define gas pressure and magnetic pressure p0 = rho(ix,iy,iz)*energy(ix,iy,iz)*(gamma-1.0_num) p1 = -0.25_num * bx(ix,iy,iz)**2 - 0.5_num * bphi**2 !change density and energy r1 = xc(ix)/lambda rho1 = (p1/p0)*rho(ix,iy,iz)*EXP(-(r1**2)) rho(ix,iy,iz) = rho(ix,iy,iz) + rho1 energy(ix,iy,iz)= (p0 + p1) / (rho(ix,iy,iz) * (gamma - 1.0_num)) END DO END DO END DO DEALLOCATE(dzb_global, dzc_global, zc_global) DEALLOCATE(grav_global, mu_m) DEALLOCATE(rho_ref, energy_ref, t_ref) END SUBROUTINE set_initial_conditions !Kink unstable loop from Arber et al, 1999 SUBROUTINE equilibrium INTEGER :: ix, iy, iz REAL(num) :: rc, x1, y1, b_theta, amp, k, r0, a, r1, mu REAL(num):: b0, bz0 vx = 0.0_num vy = 0.0_num vz = 0.0_num bx = 1.0_num by = 0.0_num bz = 0.0_num r0 = 1.0_num / SQRT(6.0_num) a = 5.0_num / (6.0_num * SQRT(6.0_num)) bz0 = 1.0_num b0 = 4.3_num DO ix = -1, nx+2 ! setup static equilibrium values DO iy = -1, ny+2 DO iz = -1, nz+2 rc = SQRT(xc(ix)**2 + yc(iy)**2) IF (rc >= 1.0_num) rc = 1.0_num bz(ix,iy,iz) = (1.0/2.0)*rc**2 - (3.0/8.0)*(rc**4/r0**2) & + (7.0*a/25.0)*(rc**5/r0**3) + (1.0/12.0)*(rc**6/r0**4) & - (9.0*a/70.0)*(rc**7/r0**5) + (1.0*a**2/20.0)*(rc**8/r0**6) bz(ix,iy,iz) = SQRT(bz0**2 - b0**2*bz(ix,iy,iz)) rho(ix,iy,iz) = 0.45_num*(1.0_num+COS(pi*rc))+0.1_num x1 = xb(ix) y1 = yc(iy) rc = SQRT(x1**2 + y1**2) IF (rc >= 1.0_num) rc = 1.0_num b_theta = rc/2.0 - rc**3/(4.0*r0**2) + a*rc**4/(5.0*r0**3) bx(ix,iy,iz) = - b0 * b_theta * y1 / rc x1 = xc(ix) y1 = yb(iy) rc = SQRT(x1**2 + y1**2) IF (rc >= 1.0_num) rc = 1.0_num b_theta = rc/2.0 - rc**3/(4.0*r0**2) + a*rc**4/(5.0*r0**3) by(ix,iy,iz) = b0 * b_theta * x1 / rc END DO END DO END DO bx(-2,:,:) = bx(-1,:,:) by(:,-2,:) = by(:,-1,:) bz(:,:,-2) = bz(:,:,-1) WHERE (rho < 0.1_num) rho = 0.1_num energy = 0.01_num / (rho * (gamma-1.0_num)) k = 2.0_num * pi / length_z ! apply velocity perturbation amp = 1.e-2_num r1 = 0.95_num mu = 0.2_num DO ix = -1, nx+2 DO iy = -1, ny+2 DO iz = -1, nz+2 rc = SQRT(xb(ix)**2 + yb(iy)**2) IF (rc < r1) THEN vx(ix,iy,iz) = amp*COS(2.5_num*k*zc(iz)) & * (1.0_num+COS(k*zc(iz)))*(1.0_num - (rc/r1)**2)**mu vy(ix,iy,iz) = amp*SIN(2.5_num*k*zc(iz)) & * (1.0_num+COS(k*zc(iz)))*(1.0_num - (rc/r1)**2)**mu ELSE vx(ix,iy,iz) = 0.0_num vy(ix,iy,iz) = 0.0_num END IF END DO END DO END DO END SUBROUTINE equilibrium !example of how to get B from A SUBROUTINE equilibrium INTEGER :: ix, iy, iz REAL(num), DIMENSION(:,:,:), ALLOCATABLE :: ax,ay,az REAL(num) :: mag_scale_height, b0, r, radius ALLOCATE(ax(-2:nx+2,-2:ny+2,-2:nz+2),ay(-2:nx+2,-2:ny+2,-2:nz+2),az(-2:nx+2,-2:ny+2,-2:nz+2)) ax = 0.0_num ay = 0.0_num az = 0.0_num vx = 0.0_num vy = 0.0_num vz = 0.0_num rho = 1.0_num energy = 0.01_num / (gamma - 1.0_num) bx = 0.0_num by = 0.0_num bz = 0.0_num r = 10.0_num ! loop major radius, foot points are at +- r b0 = 1.0_num mag_scale_height = 2.0_num * r / pi grav = 0.0_num ! Define the vector potential DO iz = -2, nz + 2 DO ix = -2, nx + 2 ay(ix,:,iz) = b0 * mag_scale_height * COS(xb(ix) / mag_scale_height) * EXP(-zb(iz)/mag_scale_height) END DO END DO ! Take the curl of the vector potential to get B DO iz = -1, nz + 2 DO iy = -1, ny + 2 DO ix = -1, nx + 2 ixm = ix - 1 iym = iy - 1 izm = iz - 1 bx(ix,iy,iz) = (az(ix,iy,iz) - az(ix,iym,iz)) / dyb(iy) - (ay(ix,iy,iz) - ay(ix,iy,izm)) / dzb(iz) by(ix,iy,iz) = (ax(ix,iy,iz) - ax(ix,iy,izm)) / dzb(iz) - (az(ix,iy,iz) - az(ixm,iy,iz)) / dxb(ix) bz(ix,iy,iz) = (ay(ix,iy,iz) - ay(ixm,iy,iz)) / dxb(ix) - (ax(ix,iy,iz) - ax(ix,iym,iz)) / dyb(iy) END DO END DO END DO DO iz = -1, nz + 2 DO iy = -1, ny + 2 iym = iy - 1 izm = iz - 1 bx(-2,iy,iz) = (az(-2,iy,iz) - az(-2,iym,iz)) / dyb(iy) - (ay(-2,iy,iz) - ay(-2,iy,izm)) / dzb(iz) END DO END DO DO iz = -1, nz + 2 DO ix = -1, nx + 2 by(ix,-2,iz) = (ax(ix,-2,iz) - ax(ix,-2,izm)) / dzb(iz) - (az(ix,-2,iz) - az(ixm,-2,iz)) / dxb(ix) END DO END DO DO iy = -1, ny + 2 DO ix = -1, nx + 2 bz(ix,iy,-2) = (ay(ix,iy,-2) - ay(ixm,iy,-2)) / dxb(ix) - (ax(ix,iy,-2) - ax(ix,iym,-2)) / dyb(iy) END DO END DO DEALLOCATE(ax,ay,az) END SUBROUTINE equilibrium Lare3d/src/Old/README000644 000765 000024 00000000444 11406627300 014452 0ustar00Tonystaff000000 000000 This directory contains old code that may prove useful as examples etc. Check in anything you think may be useful but be sure to comment it! 1. Arber: added example initial_conditions.f90 to file from old setups that may be useful skeletons for future work and to recover old setups easily.Lare3d/src/io/input.f90000644 000765 000024 00000014176 11406627300 015151 0ustar00Tonystaff000000 000000 MODULE input USE shared_data USE iocommon USE inputfunctions IMPLICIT NONE SAVE CONTAINS SUBROUTINE cfd_open_read(filename) CHARACTER(len = *), INTENT(IN) :: filename CHARACTER(len = 3) :: CFD INTEGER :: file_version, file_revision CALL MPI_BARRIER(cfd_comm, cfd_errcode) CALL MPI_FILE_OPEN(cfd_comm, TRIM(filename), cfd_mode, & MPI_INFO_NULL, cfd_filehandle, cfd_errcode) current_displacement = 0 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) ! Read the header CALL MPI_FILE_READ_ALL(cfd_filehandle, CFD, 3, MPI_CHARACTER, & cfd_status, cfd_errcode) ! If this isn't "CFD" then this isn't a CFD file IF (CFD /= "CFD") THEN CALL MPI_FILE_CLOSE(cfd_filehandle, cfd_errcode) PRINT *, "The specified file is not a valid CFD file" CALL MPI_ABORT(cfd_comm, cfd_errcode) END IF current_displacement = 3 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_INTEGER, MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) ! Read in the basic file info. Should check version info, but ! this is version 1, so let's not worry about it CALL MPI_FILE_READ_ALL(cfd_filehandle, header_offset, 1, & MPI_INTEGER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, block_header_size, 1, & MPI_INTEGER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, file_version, 1, & MPI_INTEGER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, file_revision, 1, & MPI_INTEGER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, max_string_len, 1, & MPI_INTEGER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, nblocks, 1, MPI_INTEGER, & cfd_status, cfd_errcode) IF (file_version .GT. cfd_version) THEN IF (rank == default_rank) PRINT *, "Version number incompatible" CALL MPI_ABORT(cfd_comm, cfd_errcode) END IF IF (file_revision .GT. cfd_revision) THEN IF (rank == default_rank) PRINT *, "Revision number of file is ", & "too high. Writing disabled" cfd_writing = .FALSE. END IF current_displacement = header_offset END SUBROUTINE cfd_open_read SUBROUTINE cfd_get_next_block_info_all(name, class, type) CHARACTER(len = *), INTENT(INOUT) :: name, class CHARACTER(len = max_string_len) :: name_l, class_l INTEGER, INTENT(OUT) :: type INTEGER :: len_name, len_class len_name = LEN(name) len_class = LEN(name) block_header_start = current_displacement CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, name_l, max_string_len, & MPI_CHARACTER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, class_l, max_string_len, & MPI_CHARACTER, cfd_status, cfd_errcode) current_displacement = current_displacement + 2 * max_string_len CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_INTEGER, MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, type, 1, MPI_INTEGER, & cfd_status, cfd_errcode) c_block_type = type name = name_l(1:MIN(len_name, max_string_len)) class = class_l(1:MIN(len_class, max_string_len)) current_displacement = current_displacement + 4 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_INTEGER8, MPI_INTEGER8, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, block_md_length, 1, & MPI_INTEGER8, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, block_length, 1, & MPI_INTEGER8, cfd_status, cfd_errcode) ! Skip past the header block current_displacement = block_header_start + block_header_size block_header_end = current_displacement END SUBROUTINE cfd_get_next_block_info_all SUBROUTINE cfd_get_common_meshtype_metadata_all(type, nd, sof) ! Mesh and mesh variables (and other types such as multimat ! objects start in the same way). An integer type and a ! dimensionality, so just have one routine INTEGER, INTENT(INOUT) :: type, nd, sof CALL cfd_skip_block_header() ! Now at start of metadata CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_INTEGER, MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, type, 1, MPI_INTEGER, & cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, nd, 1, MPI_INTEGER, & cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, sof, 1, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + 3 * soi END SUBROUTINE cfd_get_common_meshtype_metadata_all SUBROUTINE cfd_get_snapshot(time, snap) REAL(KIND = 8), INTENT(OUT) :: time INTEGER, INTENT(OUT) :: snap CALL cfd_skip_block_header() ! Now at start of metadata CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_INTEGER, MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, snap, 1, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_DOUBLE_PRECISION, MPI_DOUBLE_PRECISION, "native", & MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, time, 1, mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_snapshot SUBROUTINE cfd_get_real_constant(value) REAL(num), INTENT(OUT) :: value CALL cfd_skip_block_header() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & mpireal, mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, value, 1, mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_real_constant END MODULE input Lare3d/src/io/input_arb.f90000644 000765 000024 00000002427 11406627300 015771 0ustar00Tonystaff000000 000000 MODULE input_arb USE shared_data USE iocommon USE inputfunctions USE output IMPLICIT NONE CONTAINS ! This subroutine is used to wrap a block containing program specific data ! Which there is no general way of allowing other programs to read ! It permits the use of a single string to idenitify the program that wrote it SUBROUTINE cfd_get_arb_block(reader) INTERFACE SUBROUTINE reader(filehandle, current_displacement, generator_name) USE shared_data INTEGER, INTENT(IN) :: filehandle INTEGER(KIND = MPI_OFFSET_KIND), INTENT(IN) :: current_displacement CHARACTER(LEN = *), INTENT(IN) :: generator_name END SUBROUTINE reader END INTERFACE CHARACTER(LEN = max_string_len) :: gen_name CALL cfd_skip_block_header CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, gen_name, max_string_len, & MPI_CHARACTER, cfd_status, cfd_errcode) current_displacement = current_displacement + max_string_len CALL cfd_skip_block_metadata CALL reader(cfd_filehandle, current_displacement, gen_name) CALL cfd_skip_block END SUBROUTINE cfd_get_arb_block END MODULE input_arb Lare3d/src/io/input_cartesian.f90000644 000765 000024 00000020202 11406627300 017165 0ustar00Tonystaff000000 000000 MODULE input_cartesian USE shared_data USE iocommon USE inputfunctions IMPLICIT NONE SAVE CONTAINS ! Grid loading functions SUBROUTINE cfd_get_nd_cartesian_grid_metadata_all(ndims, dims, extents) INTEGER, DIMENSION(:), INTENT(OUT) :: dims REAL(num), DIMENSION(:), INTENT(OUT) :: extents INTEGER, INTENT(IN) :: ndims ! this subroutine MUST be called after the call to ! get_common_mesh_metadata_all or it will break everything CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, dims, ndims, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + ndims * soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, extents, ndims*2, mpireal, & cfd_status, cfd_errcode) ! After this subroutine, all the metadata should be read in, so to ! make sure, just jump to known start of Data CALL cfd_skip_block_metadata() END SUBROUTINE cfd_get_nd_cartesian_grid_metadata_all SUBROUTINE cfd_get_1d_cartesian_grid_all(x) REAL(num), DIMENSION(:), INTENT(INOUT) :: x INTEGER :: nx CALL cfd_skip_block_metadata() nx = SIZE(x) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, x, nx, mpireal, cfd_status, & cfd_errcode) ! That should be it, so now skip to end of block CALL cfd_skip_block END SUBROUTINE cfd_get_1d_cartesian_grid_all SUBROUTINE cfd_get_2d_cartesian_grid_all(x, y) REAL(num), DIMENSION(:), INTENT(INOUT) :: x, y INTEGER :: nx, ny CALL cfd_skip_block_metadata() nx = SIZE(x) ny = SIZE(y) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, x, nx, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, y, ny, mpireal, cfd_status, & cfd_errcode) ! That should be it, so now skip to end of block CALL cfd_skip_block END SUBROUTINE cfd_get_2d_cartesian_grid_all SUBROUTINE cfd_get_3d_cartesian_grid_all(x, y, z) REAL(num), DIMENSION(:), INTENT(INOUT) :: x, y, z INTEGER :: nx, ny, nz nx = SIZE(x) ny = SIZE(y) nz = SIZE(z) CALL cfd_skip_block_metadata() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, x, nx, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, y, ny, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, z, nz, mpireal, cfd_status, & cfd_errcode) ! That should be it, so now skip to end of block CALL cfd_skip_block END SUBROUTINE cfd_get_3d_cartesian_grid_all ! variable loading functions SUBROUTINE cfd_get_nd_cartesian_variable_metadata_all(ndims, dims, & extents, stagger, meshname, meshclass) INTEGER, DIMENSION(:), INTENT(OUT) :: dims REAL(num), DIMENSION(:), INTENT(OUT) :: extents REAL(num), DIMENSION(:), INTENT(OUT) :: stagger INTEGER, INTENT(IN) :: ndims CHARACTER(len = *), INTENT(INOUT) :: meshname, meshclass CHARACTER(len = max_string_len) :: meshname_file, meshclass_file INTEGER :: len_name, len_class len_name = LEN(meshname) len_class = LEN(meshclass) ! This subroutine MUST be called after the call to ! get_common_mesh_metadata_all or it will break everything CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, dims, ndims, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + ndims * soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) ! Read grid stagger CALL MPI_FILE_READ_ALL(cfd_filehandle, stagger, ndims, mpireal, & cfd_status, cfd_errcode) ! Read data range CALL MPI_FILE_READ_ALL(cfd_filehandle, extents, 2, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, meshname_file, max_string_len, & MPI_CHARACTER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, meshclass_file, max_string_len, & MPI_CHARACTER, cfd_status, cfd_errcode) meshname = meshname_file(1:MIN(max_string_len, len_name)) meshclass = meshclass_file(1:MIN(max_string_len, len_class)) ! After this subroutine, all the metadata should be read in, so to ! make sure, just jump to known start of Data CALL cfd_skip_block_metadata() END SUBROUTINE cfd_get_nd_cartesian_variable_metadata_all SUBROUTINE cfd_get_1d_cartesian_variable_parallel(variable, subtype) REAL(num), INTENT(IN), DIMENSION(:) :: variable INTEGER, INTENT(IN) :: subtype CALL cfd_skip_block_metadata() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & subtype, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, variable, SIZE(variable), mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_1d_cartesian_variable_parallel SUBROUTINE cfd_get_1d_cartesian_variable_all(variable) REAL(num), INTENT(IN), DIMENSION(:) :: variable CALL cfd_skip_block_metadata() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, variable, SIZE(variable), mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_1d_cartesian_variable_all SUBROUTINE cfd_get_2d_cartesian_variable_parallel(variable, subtype) REAL(num), INTENT(IN), DIMENSION(:, :) :: variable INTEGER, INTENT(IN) :: subtype CALL cfd_skip_block_metadata() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & subtype, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, variable, SIZE(variable), mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_2d_cartesian_variable_parallel SUBROUTINE cfd_get_2d_cartesian_variable_all(variable) REAL(num), INTENT(IN), DIMENSION(:, :) :: variable CALL cfd_skip_block_metadata() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, variable, SIZE(variable), mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_2d_cartesian_variable_all SUBROUTINE cfd_get_3d_cartesian_variable_parallel(variable, subtype) REAL(num), INTENT(IN), DIMENSION(:, :, :) :: variable INTEGER, INTENT(IN) :: subtype CALL cfd_skip_block_metadata() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & subtype, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, variable, SIZE(variable), mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_3d_cartesian_variable_parallel SUBROUTINE cfd_get_3d_cartesian_variable_all(variable) REAL(num), INTENT(IN), DIMENSION(:, :, :) :: variable CALL cfd_skip_block_metadata() CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, variable, SIZE(variable), mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block() END SUBROUTINE cfd_get_3d_cartesian_variable_all END MODULE input_cartesian Lare3d/src/io/input_particle.f90000644 000765 000024 00000020047 11406627300 017026 0ustar00Tonystaff000000 000000 MODULE input_particle USE shared_data USE iocommon USE inputfunctions IMPLICIT NONE SAVE CONTAINS ! Grid loading functions SUBROUTINE cfd_get_nd_particle_grid_metadata_all(ndims, coord_type, & npart, extents) INTEGER, INTENT(OUT) :: coord_type INTEGER(8), INTENT(OUT) :: npart REAL(num), DIMENSION(:), INTENT(OUT) :: extents INTEGER, INTENT(IN) :: ndims ! This subroutine MUST be called after the call to ! get_common_mesh_metadata_all or it will break everything CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_INTEGER, MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, coord_type, 1, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + soi CALL MPI_FILE_READ_ALL(cfd_filehandle, npart, 1, MPI_INTEGER8, & cfd_status, cfd_errcode) current_displacement = current_displacement + soi8 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, extents, ndims * 2, mpireal, & cfd_status, cfd_errcode) ! After this subroutine, all the metadata should be read in, so to ! make sure, just jump to known start of Data CALL cfd_skip_block_metadata() END SUBROUTINE cfd_get_nd_particle_grid_metadata_all SUBROUTINE cfd_get_nd_particle_grid_all(ndims, npart, data) INTEGER, INTENT(IN) :: ndims INTEGER(KIND = 8), INTENT(IN) :: npart REAL(num), DIMENSION(:, :), INTENT(INOUT) :: data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, data, ndims * npart, mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block END SUBROUTINE cfd_get_nd_particle_grid_all SUBROUTINE cfd_get_nd_particle_grid_parallel(ndims, npart, data, sub_type) INTEGER, INTENT(IN) :: ndims INTEGER, INTENT(IN) :: sub_type INTEGER(KIND = 8), INTENT(IN) :: npart REAL(num), DIMENSION(:, :), INTENT(INOUT) :: data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & sub_type, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, data, ndims * npart, mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block END SUBROUTINE cfd_get_nd_particle_grid_parallel SUBROUTINE cfd_get_nd_particle_grid_parallel_with_iterator(ndims, & npart_local, npart_lglobal, npart_per_it, sof, sub_type, iterator) INTEGER, INTENT(IN) :: sub_type INTEGER, INTENT(IN) :: ndims INTEGER, INTENT(IN) :: sof INTEGER(KIND = 8), INTENT(IN) :: npart_local, npart_per_it, npart_lglobal INTEGER(KIND = 8) :: npart_this_it, npart_remain, size_of_stencil INTEGER :: direction LOGICAL :: start REAL(num), DIMENSION(:), ALLOCATABLE :: data INTERFACE SUBROUTINE iterator(data, npart_it, start, direction) USE shared_data REAL(num), DIMENSION(:), INTENT(INOUT) :: data INTEGER(8), INTENT(INOUT) :: npart_it LOGICAL, INTENT(IN) :: start INTEGER, INTENT(IN) :: direction END SUBROUTINE iterator END INTERFACE CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & sub_type, "native", MPI_INFO_NULL, cfd_errcode) ALLOCATE(data(1:npart_per_it)) DO direction = 1, ndims start = .TRUE. npart_remain = npart_local npart_this_it = MIN(npart_remain, npart_per_it) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & sub_type, "native", MPI_INFO_NULL, cfd_errcode) DO WHILE (npart_this_it .GT. 0) CALL MPI_FILE_READ(cfd_filehandle, data, npart_this_it, mpireal, & cfd_status, cfd_errcode) npart_remain = npart_remain - npart_this_it CALL iterator(data, npart_this_it, start, direction) start = .FALSE. npart_this_it = MIN(npart_remain, npart_per_it) END DO current_displacement = current_displacement + npart_lglobal * sof END DO DEALLOCATE(data) CALL MPI_BARRIER(cfd_comm, cfd_errcode) CALL cfd_skip_block END SUBROUTINE cfd_get_nd_particle_grid_parallel_with_iterator ! Grid loading functions SUBROUTINE cfd_get_nd_particle_variable_metadata_all(npart, range, & mesh, mesh_class) INTEGER(8), INTENT(OUT) :: npart REAL(num), DIMENSION(2), INTENT(OUT) :: range CHARACTER(LEN = max_string_len), INTENT(OUT) :: mesh, mesh_class ! This subroutine MUST be called after the call to ! get_common_mesh_metadata_all or it will break everything CALL MPI_FILE_READ_ALL(cfd_filehandle, npart, 1, MPI_INTEGER8, & cfd_status, cfd_errcode) current_displacement = current_displacement + soi8 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, range, 2, mpireal, cfd_status, & cfd_errcode) current_displacement = current_displacement + 2 * num CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, mesh, max_string_len, & MPI_CHARACTER, cfd_status, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, mesh_class, max_string_len, & MPI_CHARACTER, cfd_status, cfd_errcode) ! After this subroutine, all the metadata should be read in, so to ! make sure, just jump to known start of Data CALL cfd_skip_block_metadata() END SUBROUTINE cfd_get_nd_particle_variable_metadata_all SUBROUTINE cfd_get_nd_particle_variable_all(npart, data) INTEGER(KIND = 8), INTENT(IN) :: npart REAL(num), DIMENSION(:), INTENT(INOUT) :: data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, data, npart, mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block END SUBROUTINE cfd_get_nd_particle_variable_all SUBROUTINE cfd_get_nd_particle_variable_parallel(npart_local, data, sub_type) INTEGER, INTENT(IN) :: sub_type INTEGER(KIND = 8), INTENT(IN) :: npart_local REAL(num), DIMENSION(:, :), INTENT(INOUT) :: data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & sub_type, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_READ_ALL(cfd_filehandle, data, npart_local, mpireal, & cfd_status, cfd_errcode) CALL cfd_skip_block END SUBROUTINE cfd_get_nd_particle_variable_parallel SUBROUTINE cfd_get_nd_particle_variable_parallel_with_iterator(npart_local, & npart_per_it, sub_type, iterator) INTEGER, INTENT(IN) :: sub_type INTEGER(KIND = 8), INTENT(IN) :: npart_local, npart_per_it INTEGER(KIND = 8) :: npart_this_it, npart_remain LOGICAL :: start REAL(num), DIMENSION(:), ALLOCATABLE :: data INTERFACE SUBROUTINE iterator(data, npart_it, start) USE shared_data REAL(num), DIMENSION(:), INTENT(INOUT) :: data INTEGER(8), INTENT(INOUT) :: npart_it LOGICAL, INTENT(IN) :: start END SUBROUTINE iterator END INTERFACE CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & sub_type, "native", MPI_INFO_NULL, cfd_errcode) start = .TRUE. ALLOCATE(data(1:npart_per_it)) npart_remain = npart_local npart_this_it = MIN(npart_remain, npart_per_it) DO WHILE (npart_this_it .GT. 0) npart_this_it = MIN(npart_remain, npart_per_it) CALL MPI_FILE_READ(cfd_filehandle, data, npart_this_it, mpireal, & cfd_status, cfd_errcode) npart_remain = npart_remain - npart_this_it CALL iterator(data, npart_this_it, start) start = .FALSE. END DO CALL MPI_BARRIER(cfd_comm, cfd_errcode) DEALLOCATE(data) CALL cfd_skip_block END SUBROUTINE cfd_get_nd_particle_variable_parallel_with_iterator END MODULE input_particle Lare3d/src/io/inputfunctions.f90000644 000765 000024 00000001526 11406627300 017075 0ustar00Tonystaff000000 000000 MODULE inputfunctions USE shared_data USE iocommon IMPLICIT NONE SAVE CONTAINS SUBROUTINE cfd_skip_block ! Minimal subroutine to skip past current block ! Assumes that the file is at a point where the block header has been read ! If it's at the start of a block call cfd_skip_block_header first current_displacement = block_header_end + block_length END SUBROUTINE cfd_skip_block SUBROUTINE cfd_skip_block_header ! Minimal subroutine used to skip block header current_displacement = block_header_end END SUBROUTINE cfd_skip_block_header SUBROUTINE cfd_skip_block_metadata ! Minimal subroutine to skip straight to the start of the real data ! in the current block current_displacement = block_header_end + block_md_length END SUBROUTINE cfd_skip_block_metadata END MODULE inputfunctions Lare3d/src/io/iocommon.f90000644 000765 000024 00000004102 11406627300 015616 0ustar00Tonystaff000000 000000 MODULE iocommon USE shared_data IMPLICIT NONE SAVE INTEGER, PARAMETER :: TYPE_SCRIBBLE = -1, & TYPE_ADDITIONAL = 0, & TYPE_MESH = 1, & TYPE_MESH_VARIABLE = 2, & TYPE_SNAPSHOT = 3, & TYPE_STITCHED_VECTOR = 4, & TYPE_STITCHED_MAGNITUDE = 5, & TYPE_CONSTANT = 6, & TYPE_ARB_DB = 7, & TYPE_INTEGERARRAY = 8 INTEGER, PARAMETER :: MESH_CARTESIAN = 0, MESH_PARTICLE = 1 INTEGER, PARAMETER :: PARTICLE_CARTESIAN = 0, & PARTICLE_POLAR = 1, & PARTICLE_CYLINDRICAL = 2 INTEGER, PARAMETER :: VAR_CARTESIAN = 0, VAR_PARTICLE = 1 ! Dimension_Irrelevant is used where the dimensionality isn't needed, ! as with particle variables still keep dimensionality as a common ! quantity because other than this, they really are very alike INTEGER, PARAMETER :: DIMENSION_IRRELEVANT = 0, & DIMENSION_1D = 1, & DIMENSION_2D = 2, & DIMENSION_3D = 3 INTEGER(KIND = MPI_OFFSET_KIND) :: current_displacement INTEGER :: cfd_filehandle = -1, cfd_rank, cfd_comm, nblocks INTEGER, PARAMETER :: cfd_version = 1, cfd_revision = 0 INTEGER :: max_string_len = 40, default_rank = 0 INTEGER, PARAMETER :: header_offset_this_version = 6 * 4 + 3 ! This cannot be changed without a major revision ! If you want to add more to every meshtype, tough luck ! You'll either have to tag it to every class or ! submit it for inclusion in the next major revision ! (This shouldn't ever happen, meshtype covers too many things, ! The only thing in common is that they include spatial information) INTEGER, PARAMETER :: meshtype_header_offset = 3 * 4 INTEGER, PARAMETER :: soi = 4 ! Size of integer INTEGER, PARAMETER :: soi8 = 8 ! Size of long (normally 8 byte integer) INTEGER :: block_header_size, header_offset INTEGER :: cfd_errcode, cfd_status(MPI_STATUS_SIZE), cfd_mode LOGICAL :: cfd_writing, cfd_reading ! Current block info INTEGER :: c_block_type INTEGER(KIND = 8) :: block_length, block_md_length INTEGER(KIND = MPI_OFFSET_KIND) :: block_header_start, block_header_end END MODULE iocommon Lare3d/src/io/iocontrol.f90000644 000765 000024 00000004302 11406627300 016010 0ustar00Tonystaff000000 000000 MODULE iocontrol USE shared_data USE iocommon USE input USE output IMPLICIT NONE CONTAINS SUBROUTINE cfd_open(filename, cfd_rank_in, cfd_comm_in, mode) CHARACTER(len = *), INTENT(IN) :: filename INTEGER, INTENT(IN) :: cfd_comm_in, cfd_rank_in, mode cfd_comm = cfd_comm_in cfd_rank = cfd_rank_in cfd_mode = mode cfd_writing = IOR(IAND(mode, MPI_MODE_RDWR), IAND(mode, MPI_MODE_WRONLY)) & .NE. 0 cfd_reading = IOR(IAND(mode, MPI_MODE_RDWR), IAND(mode, MPI_MODE_RDONLY)) & .NE. 0 IF (IAND(mode, MPI_MODE_CREATE) .NE. 0) THEN ! Creating a new file of the current version, so set the header offset ! to reflect current version header_offset = header_offset_this_version ! We are opening a file to be created, so use the destructive file ! opening command CALL cfd_open_clobber(filename) ELSE ! We're opening a file which already exists, so don't damage it CALL cfd_open_read(filename) END IF END SUBROUTINE cfd_open SUBROUTINE cfd_close ! No open file IF (cfd_filehandle == -1) RETURN ! If writing IF (cfd_writing) THEN ! Go to place where the empty value for nblocks is current_displacement = header_offset - 4 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_INTEGER, MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) & CALL MPI_FILE_WRITE(cfd_filehandle, nblocks, 1, MPI_INTEGER, & cfd_status, cfd_errcode) END IF CALL MPI_BARRIER(comm, cfd_errcode) CALL MPI_FILE_CLOSE(cfd_filehandle, cfd_errcode) ! Set cfd_filehandle to -1 to show that the file is closed cfd_filehandle = -1 END SUBROUTINE cfd_close SUBROUTINE cfd_set_max_string_length(maxlen) INTEGER, INTENT(IN) :: maxlen max_string_len = maxlen END SUBROUTINE cfd_set_max_string_length SUBROUTINE cfd_set_default_rank(rank_in) INTEGER, INTENT(IN) :: rank_in default_rank = rank_in END SUBROUTINE cfd_set_default_rank FUNCTION cfd_get_nblocks() INTEGER :: cfd_get_nblocks cfd_get_nblocks = nblocks END FUNCTION cfd_get_nblocks END MODULE iocontrol Lare3d/src/io/output.f90000644 000765 000024 00000027676 11406627300 015363 0ustar00Tonystaff000000 000000 MODULE output USE shared_data USE iocommon IMPLICIT NONE SAVE PRIVATE PUBLIC :: cfd_open_clobber, cfd_write_block_header, cfd_write_meshtype_header PUBLIC :: cfd_safe_write_string, cfd_write_snapshot_data, & cfd_write_stitched_vector PUBLIC :: cfd_write_stitched_magnitude, cfd_write_real_constant PUBLIC :: cfd_write_visit_expression CONTAINS SUBROUTINE cfd_open_clobber(filename) CHARACTER(len = *), INTENT(IN) :: filename ! Set the block header block_header_size = max_string_len * 2 + 4 + 2 * 8 ! Delete file and wait IF (cfd_rank == default_rank) & CALL MPI_FILE_DELETE(TRIM(filename), MPI_INFO_NULL, cfd_errcode) CALL MPI_BARRIER(cfd_comm, cfd_errcode) CALL MPI_FILE_OPEN(cfd_comm, TRIM(filename), cfd_mode, MPI_INFO_NULL, & cfd_filehandle, cfd_errcode) IF (cfd_rank == default_rank) THEN ! Write the header CALL MPI_FILE_WRITE(cfd_filehandle, "CFD", 3, MPI_CHARACTER, cfd_status, & cfd_errcode) ! This goes next so that stuff can be added to the global header ! without breaking ! Everything CALL MPI_FILE_WRITE(cfd_filehandle, header_offset, 1, MPI_INTEGER, & cfd_status, cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, block_header_size, 1, MPI_INTEGER, & cfd_status, cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, cfd_version, 1, MPI_INTEGER, & cfd_status, cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, cfd_revision, 1, MPI_INTEGER, & cfd_status, cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, max_string_len, 1, MPI_INTEGER, & cfd_status, cfd_errcode) ! This is where the nblocks variable will go, put a zero for now CALL MPI_FILE_WRITE(cfd_filehandle, 0, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF ! Currently no blocks written nblocks = 0 ! Current displacement is just the header current_displacement = header_offset END SUBROUTINE cfd_open_clobber SUBROUTINE cfd_safe_write_string(string) CHARACTER(LEN = *), INTENT(IN) :: string CHARACTER(LEN = max_string_len) :: output INTEGER :: len_s len_s = LEN(string) ! This subroutine expects that the record marker is in place and that ! the view is set correctly. Call it only on the node which is doing the ! writing ! You still have to advance the file pointer yourself on all nodes output(1:MIN(max_string_len, len_s)) = string(1:MIN(max_string_len, len_s)) ! If this isn't the full string length then tag in a ACHAR(0) to help ! With C + + string handling IF (len_s + 1 < max_string_len) output(len_s + 1:max_string_len) = ACHAR(0) CALL MPI_FILE_WRITE(cfd_filehandle, output, max_string_len, MPI_CHARACTER, & cfd_status, cfd_errcode) END SUBROUTINE cfd_safe_write_string SUBROUTINE cfd_write_block_header(blockname, blockclass, blocktype, & blocklength, blockmetadatalength, rank_write) CHARACTER(len = *), INTENT(IN) :: blockname, blockclass INTEGER, INTENT(IN) :: blocktype, rank_write INTEGER(KIND = 8), INTENT(IN) :: blocklength, blockmetadatalength INTEGER :: len_bn, len_bc len_bn = LEN(blockname) len_bc = LEN(blockclass) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL cfd_safe_write_string(blockname) CALL cfd_safe_write_string(blockclass) END IF current_displacement = current_displacement + 2 * max_string_len ! Write the block type CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) & CALL MPI_FILE_WRITE(cfd_filehandle, blocktype, 1, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + 4 ! Write the block skip and metadata skip data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER8, & MPI_INTEGER8, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, blockmetadatalength, 1, & MPI_INTEGER8, cfd_status, cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, blocklength, 1, MPI_INTEGER8, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 2 * 8 nblocks = nblocks + 1 END SUBROUTINE cfd_write_block_header SUBROUTINE cfd_write_meshtype_header(type, dim, sof, rank_write) ! MeshTypes (Meshes, fluid variables, multimat blocks etc) ! All have a common header, this is what writes that (although the content ! Of type will depend on what meshtype you're using) INTEGER, INTENT(IN) :: type, dim, rank_write, sof CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, type, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, dim, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, sof, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + meshtype_header_offset END SUBROUTINE cfd_write_meshtype_header SUBROUTINE cfd_write_snapshot_data(time, CYCLE, rank_write) INTEGER, INTENT(IN) :: rank_write, CYCLE INTEGER(8) :: mdlength REAL(8), INTENT(IN) :: time mdlength = soi + 8 CALL cfd_write_block_header("Snapshot", "Snapshot", TYPE_SNAPSHOT, & mdlength, mdlength, rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) & CALL MPI_FILE_WRITE(cfd_filehandle, CYCLE, 1, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_DOUBLE_PRECISION, MPI_DOUBLE_PRECISION, "native", MPI_INFO_NULL, & cfd_errcode) IF (cfd_rank == rank_write) & CALL MPI_FILE_WRITE(cfd_filehandle, time, 1, MPI_DOUBLE_PRECISION, & cfd_status, cfd_errcode) current_displacement = current_displacement + 8 END SUBROUTINE cfd_write_snapshot_data SUBROUTINE cfd_write_stitched_vector(vector_name, vector_class, mesh_name, & mesh_class, name, class, rank_write) CHARACTER(len = *), DIMENSION(:), INTENT(IN) :: name, class CHARACTER(len = *), INTENT(IN) :: vector_name, vector_class, mesh_name, & mesh_class INTEGER, INTENT(IN) :: rank_write INTEGER(8) :: n_dims, mdlength, blocklength INTEGER :: i_loop n_dims = SIZE(name) mdlength = 2 * max_string_len + soi blocklength = mdlength + n_dims * 2 * max_string_len CALL cfd_write_block_header(vector_name, vector_class, & TYPE_STITCHED_VECTOR, blocklength, mdlength, rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL cfd_safe_write_string(mesh_name) CALL cfd_safe_write_string(mesh_class) END IF current_displacement = current_displacement + 2 * max_string_len CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) & CALL MPI_FILE_WRITE(cfd_filehandle, n_dims, 1, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN DO i_loop = 1, n_dims CALL cfd_safe_write_string(name(i_loop)) CALL cfd_safe_write_string(class(i_loop)) END DO END IF current_displacement = current_displacement + 2 * n_dims* max_string_len END SUBROUTINE cfd_write_stitched_vector SUBROUTINE cfd_write_stitched_magnitude(magn_name, magn_class, mesh_name, & mesh_class, name, class, rank_write) CHARACTER(len = *), DIMENSION(:), INTENT(IN) :: name, class CHARACTER(len = *), INTENT(IN) :: magn_name, magn_class, mesh_name, & mesh_class INTEGER, INTENT(IN) :: rank_write INTEGER(8) :: n_dims, mdlength, blocklength INTEGER :: i_loop n_dims = SIZE(name) mdlength = 2 * max_string_len + soi blocklength = mdlength + n_dims * 2 * max_string_len CALL cfd_write_block_header(magn_name, magn_class, & TYPE_STITCHED_MAGNITUDE, blocklength, mdlength, rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL cfd_safe_write_string(mesh_name) CALL cfd_safe_write_string(mesh_class) END IF current_displacement = current_displacement + 2 * max_string_len CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) & CALL MPI_FILE_WRITE(cfd_filehandle, n_dims, 1, MPI_INTEGER, & cfd_status, cfd_errcode) current_displacement = current_displacement + soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN DO i_loop = 1, n_dims CALL cfd_safe_write_string(name(i_loop)) CALL cfd_safe_write_string(class(i_loop)) END DO END IF current_displacement = current_displacement + 2 * n_dims* max_string_len END SUBROUTINE cfd_write_stitched_magnitude SUBROUTINE cfd_write_real_constant(name, class, value, rank_write) CHARACTER(len = *), INTENT(IN) :: name, class REAL(num), INTENT(IN) :: value INTEGER, INTENT(IN) :: rank_write INTEGER(8) :: mdlength mdlength = num CALL cfd_write_block_header(name, class, TYPE_CONSTANT, mdlength, & mdlength, rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, value, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + num END SUBROUTINE cfd_write_real_constant SUBROUTINE cfd_write_1d_integer_array(name, class, values, rank_write) CHARACTER(len = *), INTENT(IN) :: name, class INTEGER, DIMENSION(:), INTENT(IN) :: values INTEGER, INTENT(IN) :: rank_write INTEGER(8) :: mdlength mdlength = 2 * soi CALL cfd_write_block_header(name, class, TYPE_INTEGERARRAY, mdlength, & mdlength, rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN ! 1D CALL MPI_FILE_WRITE(cfd_filehandle, 1, 1, MPI_INTEGER, cfd_status, & cfd_errcode) ! Size of array CALL MPI_FILE_WRITE(cfd_filehandle, 1, SIZE(values), MPI_INTEGER, & cfd_status, cfd_errcode) ! Actual Array CALL MPI_FILE_WRITE(cfd_filehandle, values, SIZE(values), MPI_INTEGER, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + mdlength END SUBROUTINE cfd_write_1d_integer_array SUBROUTINE cfd_write_visit_expression(expression_name, expression_class, & expression) CHARACTER(LEN = *), DIMENSION(:), INTENT(IN) :: expression_name, & expression_class, expression PRINT *, LEN(expression(1)), LEN(expression(2)) END SUBROUTINE cfd_write_visit_expression END MODULE output Lare3d/src/io/output_arb.f90000644 000765 000024 00000003153 11406627300 016167 0ustar00Tonystaff000000 000000 MODULE output_arb USE shared_data USE iocommon USE output IMPLICIT NONE CONTAINS ! This subroutine is used to wrap a block containing program specific data ! Which there is no general way of allowing other programs to read ! It permits the use of a single string to idenitify the program that wrote it SUBROUTINE cfd_write_arb_block(name, class, generator_desc, & data_length, writer) CHARACTER(LEN = *), INTENT(IN) :: name, class, generator_desc INTEGER(8), INTENT(IN) :: data_length INTERFACE SUBROUTINE writer(filehandle, current_displacement) USE shared_data INTEGER, INTENT(IN) :: filehandle INTEGER(KIND = MPI_OFFSET_KIND), INTENT(IN) :: current_displacement END SUBROUTINE writer END INTERFACE INTEGER(KIND = 8) :: mdlength, blocklength INTEGER(KIND = MPI_OFFSET_KIND) :: initial_displacement ! Outputs general block header as described in cfd_write_block_header ! and then a single string mdlength = 1 * max_string_len blocklength = mdlength + data_length CALL cfd_write_block_header(name, class, TYPE_ARB_DB, blocklength, & mdlength, default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) CALL cfd_safe_write_string(generator_desc) current_displacement = current_displacement + max_string_len CALL writer(cfd_filehandle, current_displacement) current_displacement = current_displacement + data_length END SUBROUTINE cfd_write_arb_block END MODULE output_arb Lare3d/src/io/output_cartesian.f90000644 000765 000024 00000072455 11406627300 017407 0ustar00Tonystaff000000 000000 MODULE output_cartesian USE shared_data USE iocommon USE output IMPLICIT NONE CONTAINS !-------------------------------------------------------------------------- ! Code to write a 1D Cartesian grid in serial from the node with rank ! {rank_write} ! Serial operation, so no need to specify nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_1d_cartesian_grid(name, class, x, rank_write) REAL(num), DIMENSION(:), INTENT(IN) :: x CHARACTER(len = *), INTENT(IN) :: name, class INTEGER, INTENT(IN) :: rank_write INTEGER(4) :: nx INTEGER(8) :: blocklen, mdlen nx = SIZE(x) ! Metadata is !* ) meshtype (INTEGER(4)) All mesh blocks contain this !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! Specific to Cartesian Grid ! 1 ) nx INTEGER(4) ! 2 ) xmin REAL(num) ! 3 ) xmax REAL(num) ! 1 ints, 2 reals + meshtype Header mdlen = meshtype_header_offset + 1 * soi + 2 * num blocklen = mdlen + nx * num ! Now written header, write metadata CALL cfd_write_block_header(name, class, TYPE_MESH, blocklen, mdlen, & rank_write) CALL cfd_write_meshtype_header(MESH_CARTESIAN, DIMENSION_1D, num, & rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 1 * soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, MINVAL(x), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MAXVAL(x), 1, mpireal, cfd_status, & cfd_errcode) ! Now write the real arrays CALL MPI_FILE_WRITE(cfd_filehandle, x, nx, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 2 * num + nx * num END SUBROUTINE cfd_write_1d_cartesian_grid !-------------------------------------------------------------------------- ! Code to write a 2D Cartesian grid in serial from the node with rank ! {rank_write} ! Serial operation, so no need to specify nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_2d_cartesian_grid(name, class, x, y, rank_write) REAL(num), DIMENSION(:), INTENT(IN) :: x, y CHARACTER(len = *), INTENT(IN) :: name, class INTEGER, INTENT(IN) :: rank_write INTEGER(4) :: nx, ny INTEGER(8) :: blocklen, mdlen nx = SIZE(x) ny = SIZE(y) ! Metadata is !* ) meshtype (INTEGER(4)) All mesh blocks contain this !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! Specific to Cartesian Grid ! 1 ) nx INTEGER(4) ! 2 ) ny INTEGER(4) ! 3 ) xmin REAL(num) ! 4 ) xmax REAL(num) ! 5 ) ymin REAL(num) ! 6 ) ymax REAL(num) ! 2 ints, 6 reals + meshtype Header mdlen = meshtype_header_offset + 2 * soi + 4 * num blocklen = mdlen + (nx+ny) * num ! Now written header, write metadata CALL cfd_write_block_header(name, class, TYPE_MESH, blocklen, mdlen, & rank_write) CALL cfd_write_meshtype_header(MESH_CARTESIAN, DIMENSION_2D, num, & rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, ny, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 2 * soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, MINVAL(x), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MAXVAL(x), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MINVAL(y), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MAXVAL(y), 1, mpireal, cfd_status, & cfd_errcode) ! Now write the real arrays CALL MPI_FILE_WRITE(cfd_filehandle, x, nx, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, y, ny, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 4 * num + (nx+ny) * num END SUBROUTINE cfd_write_2d_cartesian_grid !-------------------------------------------------------------------------- ! Code to write a 3D Cartesian grid in serial from the node with rank ! {rank_write} ! Serial operation, so no need to specify nx, ny, nz !-------------------------------------------------------------------------- SUBROUTINE cfd_write_3d_cartesian_grid(name, class, x, y, z, rank_write) REAL(num), DIMENSION(:), INTENT(IN) :: x, y, z CHARACTER(len = *), INTENT(IN) :: name, class INTEGER, INTENT(IN) :: rank_write INTEGER(4) :: nx, ny, nz INTEGER(8) :: blocklen, mdlen nx = SIZE(x) ny = SIZE(y) nz = SIZE(z) ! Metadata is !* ) meshtype (INTEGER(4)) All mesh blocks contain this !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! Specific to Cartesian Grid ! 2 ) nx INTEGER(4) ! 3 ) ny INTEGER(4) ! 4 ) nz INTEGER(4) ! 5 ) xmin REAL(num) ! 6 ) xmax REAL(num) ! 7 ) ymin REAL(num) ! 8 ) ymax REAL(num) ! 9 ) zmin REAL(num) ! 10) zmax REAL(num) ! 3 ints, 6 reals + meshtype Header mdlen = meshtype_header_offset + 3 * soi + 6 * num blocklen = mdlen + (nx+ny+nz) * num ! Now written header, write metadata CALL cfd_write_block_header(name, class, TYPE_MESH, blocklen, mdlen, & rank_write) CALL cfd_write_meshtype_header(MESH_CARTESIAN, DIMENSION_3D, num, & rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, ny, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, nz, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 3 * soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, MINVAL(x), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MAXVAL(x), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MINVAL(y), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MAXVAL(y), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MINVAL(z), 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, MAXVAL(z), 1, mpireal, cfd_status, & cfd_errcode) ! Now write the real arrays CALL MPI_FILE_WRITE(cfd_filehandle, x, nx, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, y, ny, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, z, nz, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 6 * num + (nx+ny+nz) * num END SUBROUTINE cfd_write_3d_cartesian_grid !-------------------------------------------------------------------------- ! Code to write a 3D Cartesian variable in parallel using the mpitype ! {distribution} for distribution of data ! It's up to the coder to design the distribution ! Parallel operation, so need global nx, ny, nz !-------------------------------------------------------------------------- SUBROUTINE cfd_write_3d_cartesian_variable_parallel(name, class, dims, & stagger, meshname, meshclass, variable, distribution) CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass REAL(num), DIMENSION(:, :, :), INTENT(IN) :: variable INTEGER, INTENT(IN) :: distribution INTEGER, INTENT(IN), DIMENSION(3) :: dims REAL(num), INTENT(IN), DIMENSION(3) :: stagger REAL(num) :: mn, mx, mn_global, mx_global INTEGER(8) :: blocklen, mdlen, len_var INTEGER :: nx, ny, nz ! * ) VariableType (INTEGER(4)) All variable blocks contain this ! These are specific to a cartesian variable !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! 1 ) nx INTEGER(4) ! 2 ) ny INTEGER(4) ! 3 ) nz INTEGER(4) ! 4 ) stx REAL(num) ! 5 ) sty REAL(num) ! 6 ) stz REAL(num) ! 7 ) dmin REAL(num) ! 8 ) dmax REAL(num) ! 9 ) Mesh CHARACTER(max_string_len) ! 10) class CHARACTER(max_string_len) len_var = SIZE(variable) nx = dims(1) ny = dims(2) nz = dims(3) ! 3 INTs 5 REALs 2STRINGs mdlen = meshtype_header_offset + 3 * soi + 5 * num + 2 * max_string_len blocklen = mdlen + num * nx * ny * nz ! Write the common stuff CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, default_rank) CALL cfd_write_meshtype_header(VAR_CARTESIAN, DIMENSION_3D, num, & default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, ny, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, nz, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 3 * soi ! Set the file view CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) ! Determine data ranges and write out mn = MINVAL(variable) mx = MAXVAL(variable) CALL MPI_ALLREDUCE(mn, mn_global, 1, mpireal, MPI_MIN, cfd_comm, & cfd_errcode) CALL MPI_ALLREDUCE(mx, mx_global, 1, mpireal, MPI_MAX, cfd_comm, & cfd_errcode) IF (cfd_rank == default_rank) THEN ! Write out grid stagger CALL MPI_FILE_WRITE(cfd_filehandle, stagger, 3, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mn_global, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx_global, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 5 * num ! Write the mesh name and class CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len ! Write the actual Data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & distribution, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_WRITE_ALL(cfd_filehandle, variable, len_var, mpireal, & cfd_status, cfd_errcode) current_displacement = current_displacement + num * nx * ny * nz END SUBROUTINE cfd_write_3d_cartesian_variable_parallel !-------------------------------------------------------------------------- ! Code to write a 2D Cartesian variable in parallel using the mpitype ! {distribution} for distribution of data ! It's up to the coder to design the distribution ! Parallel operation, so need global nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_2d_cartesian_variable_parallel(name, class, dims, & stagger, meshname, meshclass, variable, distribution) CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass REAL(num), DIMENSION(:, :), INTENT(IN) :: variable INTEGER, INTENT(IN) :: distribution INTEGER, INTENT(IN), DIMENSION(2) :: dims REAL(num), INTENT(IN), DIMENSION(2) :: stagger INTEGER :: nx, ny REAL(num) :: mn, mx, mn_global, mx_global INTEGER(8) :: blocklen, mdlen, len_var ! * ) VariableType (INTEGER(4)) All variable blocks contain this ! These are specific to a cartesian variable !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! 1 ) nx INTEGER(4) ! 2 ) ny INTEGER(4) ! 3 ) stx REAL(num) ! 4 ) sty REAL(num) ! 5 ) dmin REAL(num) ! 6 ) dmax REAL(num) ! 7 ) Mesh CHARACTER(max_string_len) ! 8 ) class CHARACTER(max_string_len) len_var = SIZE(variable) nx = dims(1) ny = dims(2) ! 3 INTs 2 REALs mdlen = meshtype_header_offset + 2 * soi + 4 * num + 2 * max_string_len blocklen = mdlen + num * nx * ny CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, default_rank) CALL cfd_write_meshtype_header(VAR_CARTESIAN, DIMENSION_2D, num, & default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, ny, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 2 * soi ! Determine data ranges and write out mn = MINVAL(variable) mx = MAXVAL(variable) CALL MPI_ALLREDUCE(mn, mn_global, 1, mpireal, MPI_MIN, cfd_comm, & cfd_errcode) CALL MPI_ALLREDUCE(mx, mx_global, 1, mpireal, MPI_MAX, cfd_comm, & cfd_errcode) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, stagger, 2, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mn_global, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx_global, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 4 * num ! Write the mesh name and class CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len ! Write the actual Data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & distribution, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_WRITE_ALL(cfd_filehandle, variable, len_var, mpireal, & cfd_status, cfd_errcode) current_displacement = current_displacement + num * nx * ny END SUBROUTINE cfd_write_2d_cartesian_variable_parallel !-------------------------------------------------------------------------- ! Code to write a 1D Cartesian variable in parallel using the mpitype ! {distribution} for distribution of data ! It's up to the coder to design the distribution ! Parallel operation, so need global nx !-------------------------------------------------------------------------- SUBROUTINE cfd_write_1d_cartesian_variable_parallel(name, class, dims, & stagger, meshname, meshclass, variable, distribution) CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass REAL(num), DIMENSION(:), INTENT(IN) :: variable INTEGER, INTENT(IN) :: distribution INTEGER, INTENT(IN) :: dims REAL(num), INTENT(IN) :: stagger INTEGER :: nx REAL(num) :: mn, mx, mn_global, mx_global INTEGER(8) :: blocklen, mdlen, len_var ! * ) VariableType (INTEGER(4)) All variable blocks contain this ! These are specific to a cartesian variable !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! 1 ) nx INTEGER(4) ! 2 ) stx REAL(num) ! 3 ) dmin REAL(num) ! 4 ) dmax REAL(num) ! 5 ) Mesh CHARACTER(max_string_len) ! 6 ) class CHARACTER(max_string_len) len_var = SIZE(variable) nx = dims ! 1 INTs 3 REALs 2 Strings mdlen = meshtype_header_offset + 1 * soi + 3 * num + 2 * max_string_len blocklen = mdlen + num * nx CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, default_rank) CALL cfd_write_meshtype_header(VAR_CARTESIAN, DIMENSION_1D, num, & default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 1 * soi ! Determine data ranges and write out mn = MINVAL(variable) mx = MAXVAL(variable) CALL MPI_ALLREDUCE(mn, mn_global, 1, mpireal, MPI_MIN, cfd_comm, & cfd_errcode) CALL MPI_ALLREDUCE(mx, mx_global, 1, mpireal, MPI_MAX, cfd_comm, & cfd_errcode) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, stagger, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mn_global, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx_global, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 3 * num ! Write the mesh name and class CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len ! Write the actual Data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & distribution, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_FILE_WRITE_ALL(cfd_filehandle, variable, len_var, mpireal, & cfd_status, cfd_errcode) current_displacement = current_displacement + num * nx END SUBROUTINE cfd_write_1d_cartesian_variable_parallel !-------------------------------------------------------------------------- ! Code to write a 1D Cartesian variable in serial using node with rank ! {rank_write} for writing ! Serial operation, so no need for nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_1d_cartesian_variable(name, class, stagger, meshname, & meshclass, variable, rank_write) CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass REAL(num), DIMENSION(:), INTENT(IN) :: variable INTEGER, INTENT(IN) :: rank_write REAL(num), INTENT(IN), DIMENSION(1) :: stagger INTEGER :: nx REAL(num) :: mn, mx, mn_global, mx_global INTEGER, DIMENSION(1) :: dims INTEGER(8) :: blocklen, mdlen, len_var ! * ) VariableType (INTEGER(4)) All variable blocks contain this ! These are specific to a cartesian variable !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! 1 ) nx INTEGER(4) ! 2 ) stx REAL(num) ! 3 ) dmin REAL(num) ! 4 ) dmax REAL(num) ! 5 ) Mesh CHARACTER(max_string_len) ! 6 ) class CHARACTER(max_string_len) len_var = SIZE(variable) dims = SHAPE(variable) nx = dims(1) ! 1 INTs 3 REALs mdlen = meshtype_header_offset + 1 * soi + 3 * num + 2 * max_string_len blocklen = mdlen + num * nx CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, rank_write) CALL cfd_write_meshtype_header(VAR_CARTESIAN, DIMENSION_1D, num, & rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) ! This is the serial version remember IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 1 * soi ! Determine data ranges and write out mn = MINVAL(variable) mx = MAXVAL(variable) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, stagger, 2, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mn_global, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx_global, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 3 * num ! Write the mesh name and class CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len ! Write the actual Data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) CALL MPI_FILE_WRITE(cfd_filehandle, variable, & len_var, mpireal, cfd_status, cfd_errcode) current_displacement = current_displacement + num * nx END SUBROUTINE cfd_write_1d_cartesian_variable !-------------------------------------------------------------------------- ! Code to write a 2D Cartesian variable in serial using node with rank ! {rank_write} for writing ! Serial operation, so no need for nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_2d_cartesian_variable(name, class, stagger, meshname, & meshclass, variable, rank_write) CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass REAL(num), DIMENSION(:, :), INTENT(IN) :: variable INTEGER, INTENT(IN) :: rank_write REAL(num), INTENT(IN), DIMENSION(2) :: stagger INTEGER :: nx, ny REAL(num) :: mn, mx, mn_global, mx_global INTEGER, DIMENSION(2) :: dims INTEGER(8) :: blocklen, mdlen, len_var ! * ) VariableType (INTEGER(4)) All variable blocks contain this ! These are specific to a cartesian variable !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! 1 ) nx INTEGER(4) ! 2 ) ny INTEGER(4) ! 3 ) stx REAL(num) ! 4 ) sty REAL(num) ! 5 ) dmin REAL(num) ! 6 ) dmax REAL(num) ! 7 ) Mesh CHARACTER(max_string_len) ! 8 ) class CHARACTER(max_string_len) len_var = SIZE(variable) dims = SHAPE(variable) nx = dims(1) ny = dims(2) ! 2 INTs 4 REALs mdlen = meshtype_header_offset + 2 * soi + 4 * num + 2 * max_string_len blocklen = mdlen + num * nx * ny CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, rank_write) CALL cfd_write_meshtype_header(VAR_CARTESIAN, DIMENSION_2D, num, & rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) ! This is the serial version remember IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, ny, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 2 * soi ! Determine data ranges and write out mn = MINVAL(variable) mx = MAXVAL(variable) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, stagger, 2, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mn_global, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx_global, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 4 * num ! Write the mesh name and class CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len ! Write the actual Data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) CALL MPI_FILE_WRITE(cfd_filehandle, variable, & len_var, mpireal, cfd_status, cfd_errcode) current_displacement = current_displacement + num * nx * ny END SUBROUTINE cfd_write_2d_cartesian_variable !-------------------------------------------------------------------------- ! Code to write a 3D Cartesian variable in serial using node with rank ! {rank_write} for writing ! Serial operation, so no need for nx, ny, nz !-------------------------------------------------------------------------- SUBROUTINE cfd_write_3d_cartesian_variable(name, class, stagger, meshname, & meshclass, variable, rank_write) CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass REAL(num), DIMENSION(:, :, :), INTENT(IN) :: variable INTEGER, INTENT(IN) :: rank_write REAL(num), INTENT(IN), DIMENSION(3) :: stagger INTEGER :: nx, ny, nz INTEGER, DIMENSION(3) :: dims REAL(num) :: mn, mx, mn_global, mx_global INTEGER(8) :: blocklen, mdlen, len_var ! * ) VariableType (INTEGER(4)) All variable blocks contain this ! These are specific to a cartesian variable !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! 1 ) nx INTEGER(4) ! 2 ) ny INTEGER(4) ! 3 ) nz INTEGER(4) ! 4 ) stx REAL(num) ! 5 ) sty REAL(num) ! 6 ) stz REAL(num) ! 7 ) dmin REAL(num) ! 8 ) dmax REAL(num) ! 9 ) Mesh CHARACTER(max_string_len) ! 10) class CHARACTER(max_string_len) len_var = SIZE(variable) dims = SHAPE(variable) nx = dims(1) ny = dims(2) nz = dims(3) ! 3 INTs 5 REALs mdlen = meshtype_header_offset + 3 * soi + 5 * num + 2 * max_string_len blocklen = mdlen + num * nx * ny * nz CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, rank_write) CALL cfd_write_meshtype_header(VAR_CARTESIAN, DIMENSION_2D, num, & rank_write) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) ! This is the serial version remember IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, nx, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, ny, 1, MPI_INTEGER, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, nz, 1, MPI_INTEGER, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 3 * soi ! Determine data ranges and write out mn = MINVAL(variable) mx = MAXVAL(variable) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL MPI_FILE_WRITE(cfd_filehandle, stagger, 3, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mn_global, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx_global, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 5 * num ! Write the mesh name and class CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len ! Write the actual Data CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == rank_write) CALL MPI_FILE_WRITE(cfd_filehandle, variable, & len_var, mpireal, cfd_status, cfd_errcode) current_displacement = current_displacement + num * nx * ny * nz END SUBROUTINE cfd_write_3d_cartesian_variable END MODULE output_cartesian Lare3d/src/io/output_particle.f90000644 000765 000024 00000040423 11406627300 017227 0ustar00Tonystaff000000 000000 MODULE output_particle USE shared_data USE iocommon USE output IMPLICIT NONE CONTAINS !-------------------------------------------------------------------------- ! Code to write a 2D Cartesian grid in serial from the node with rank ! {rank_write} ! Serial operation, so no need to specify nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_nd_particle_grid_all(name, class, particles, & npart_global, particle_coord_type, particle_type) REAL(num), DIMENSION(:, :), INTENT(IN) :: particles CHARACTER(len = *), INTENT(IN) :: name, class INTEGER(8), INTENT(IN) :: npart_global INTEGER(4), INTENT(IN) :: particle_coord_type INTEGER, INTENT(IN) :: particle_type INTEGER(8) :: npart_local INTEGER(8) :: blocklen, mdlen INTEGER(4) :: ndim, i, disp0 INTEGER(4) :: sizes(2) REAL(num) :: mn, mx sizes = SHAPE(particles) npart_local = sizes(2) ndim = sizes(1) ! Metadata is !* ) meshtype (INTEGER(4)) All mesh blocks contain this !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! Specific to particle mesh ! 1 ) ct INTEGER(4) ! 2 ) npart INTEGER(8) ! 3 ) d1min REAL(num) ! 4 ) d1max REAL(num) ! 5 ) d2min REAL(num) ! 6 ) d2max REAL(num) ! . ! . ! . ! n ) dnmin REAL(num) ! n + 1) dnmax REAL(num) ! 1 INT, 1 INT8, 2REAL per Dim mdlen = meshtype_header_offset + 1 * soi + 1 * soi8 + ndim * 2 * num blocklen = mdlen + num * ndim * npart_global ! Now written header, write metadata CALL cfd_write_block_header(name, class, TYPE_MESH, blocklen, mdlen, & default_rank) disp0 = current_displacement CALL cfd_write_meshtype_header(MESH_PARTICLE, ndim, num, default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, particle_coord_type, 1, MPI_INTEGER, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 1 * soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER8, & MPI_INTEGER8, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, npart_global, 1, MPI_INTEGER8, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 1 * soi8 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) DO i = 1, ndim CALL MPI_ALLREDUCE(MINVAL(particles(:, i)), mn, 1, mpireal, MPI_MIN, & cfd_comm, cfd_errcode) CALL MPI_ALLREDUCE(MAXVAL(particles(:, i)), mx, 1, mpireal, MPI_MAX, & cfd_comm, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, mn, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 2 * num END DO CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & particle_type, "native", MPI_INFO_NULL, cfd_errcode) ! Write the real data CALL MPI_FILE_WRITE_ALL(cfd_filehandle, particles, npart_local * ndim, & mpireal, cfd_status, cfd_errcode) current_displacement = current_displacement + ndim * npart_global * num END SUBROUTINE cfd_write_nd_particle_grid_all !-------------------------------------------------------------------------- ! Code to write a 2D Cartesian grid in serial from the node with rank ! {rank_write} ! Serial operation, so no need to specify nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_nd_particle_grid_with_iterator_all(name, class, & iterator, ndims, npart_local, npart_global, npart_per_iteration, & particle_coord_type, particle_type) CHARACTER(len = *), INTENT(IN) :: name, class INTEGER(8), INTENT(IN) :: npart_global INTEGER(8), INTENT(IN) :: npart_local INTEGER(8), INTENT(IN) :: npart_per_iteration INTEGER(4), INTENT(IN) :: ndims INTEGER(4), INTENT(IN) :: particle_coord_type INTEGER, INTENT(IN) :: particle_type REAL(num), ALLOCATABLE, DIMENSION(:) :: data INTERFACE SUBROUTINE iterator(data, npart_it, direction, start) USE shared_data REAL(num), DIMENSION(:), INTENT(INOUT) :: data INTEGER, INTENT(IN) :: direction INTEGER(8), INTENT(INOUT) :: npart_it LOGICAL, INTENT(IN) :: start END SUBROUTINE iterator END INTERFACE INTEGER(8) :: blocklen, mdlen, npart_this_cycle, min_npart_this_cycle, & npart_sent INTEGER(4) :: idim INTEGER(4) :: sizes(2) INTEGER(MPI_OFFSET_KIND) :: offset_for_min_max REAL(num) :: mn, mx REAL(num), ALLOCATABLE, DIMENSION(:, :) :: min_max LOGICAL :: start ! Metadata is !* ) meshtype (INTEGER(4)) All mesh blocks contain this !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! Specific to particle mesh ! 1 ) ct INTEGER(4) ! 2 ) npart INTEGER(8) ! 3 ) d1min REAL(num) ! 4 ) d1max REAL(num) ! 5 ) d2min REAL(num) ! 6 ) d2max REAL(num) ! . ! . ! . ! n ) dnmin REAL(num) ! n + 1) dnmax REAL(num) ! 1 INT, 1 INT8, 2REAL per Dim mdlen = meshtype_header_offset + 1 * soi + 1 * soi8 + ndims * 2 * num blocklen = mdlen + num * ndims * npart_global ALLOCATE(min_max(1:ndims, 1:2)) min_max = 0.0_num ! Now written header, write metadata CALL cfd_write_block_header(name, class, TYPE_MESH, blocklen, mdlen, & default_rank) CALL cfd_write_meshtype_header(MESH_PARTICLE, ndims, num, default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER, & MPI_INTEGER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, particle_coord_type, 1, MPI_INTEGER, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 1 * soi CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER8, & MPI_INTEGER8, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, npart_global, 1, MPI_INTEGER8, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 1 * soi8 ! This is to skip past the location for the min / max values(Just write ! zeros). They will be filled in later offset_for_min_max = current_displacement CALL MPI_FILE_SET_VIEW(cfd_filehandle, offset_for_min_max, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, min_max, ndims * 2, mpireal, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 2 * ndims * num ! Write the real data start = .TRUE. ALLOCATE(data(1:npart_per_iteration)) npart_sent = 0 DO idim = 1, ndims CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & particle_type, "native", MPI_INFO_NULL, cfd_errcode) npart_this_cycle = npart_per_iteration start = .TRUE. DO CALL iterator(data, npart_this_cycle, idim, start) IF (npart_this_cycle <= 0) EXIT IF (start) THEN min_max(idim, 1) = MINVAL(data(1:npart_this_cycle)) min_max(idim, 2) = MAXVAL(data(1:npart_this_cycle)) ELSE min_max(idim, 1) = MIN(min_max(idim, 1), & MINVAL(data(1:npart_this_cycle))) min_max(idim, 2) = MAX(min_max(idim, 2), & MAXVAL(data(1:npart_this_cycle))) END IF start = .FALSE. npart_sent = npart_sent + npart_this_cycle CALL MPI_FILE_WRITE(cfd_filehandle, data, npart_this_cycle, mpireal, & cfd_status, cfd_errcode) END DO current_displacement = current_displacement + npart_global * num END DO DEALLOCATE(data) CALL MPI_FILE_SET_VIEW(cfd_filehandle, offset_for_min_max, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) DO idim = 1, ndims CALL MPI_ALLREDUCE(min_max(idim, 1), mn, 1, mpireal, MPI_MIN, cfd_comm, & cfd_errcode) CALL MPI_ALLREDUCE(min_max(idim, 2), mx, 1, mpireal, MPI_MAX, cfd_comm, & cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, mn, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx, 1, mpireal, cfd_status, & cfd_errcode) END IF END DO DEALLOCATE(min_max) CALL MPI_BARRIER(comm, errcode) END SUBROUTINE cfd_write_nd_particle_grid_with_iterator_all !-------------------------------------------------------------------------- ! Code to write a 2D Cartesian grid in serial from the node with rank ! {rank_write} ! Serial operation, so no need to specify nx, ny !-------------------------------------------------------------------------- SUBROUTINE cfd_write_nd_particle_variable_all(name, class, particles, & npart_global, meshname, meshclass, particle_type) REAL(num), DIMENSION(:), INTENT(IN) :: particles CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass INTEGER, INTENT(IN) :: particle_type INTEGER(8), INTENT(IN) :: npart_global INTEGER(8) :: npart_local INTEGER(8) :: blocklen, mdlen INTEGER(4) :: i INTEGER(4) :: sizes(2) REAL(num) :: mn, mx npart_local = SIZE(particles) ! Metadata is !* ) meshtype (INTEGER(4)) All mesh blocks contain this !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! Specific to particle variable ! 1 ) npart INTEGER(8) ! 2 ) vmin REAL(num) ! 3 ) vmax REAL(num) ! 4 ) mesh CHARACTER ! 5 ) mclass CHARACTER mdlen = meshtype_header_offset + 1 * soi8 + 2 * num + 2 * max_string_len blocklen = mdlen + num * npart_global ! Now written header, write metadata CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, default_rank) CALL cfd_write_meshtype_header(VAR_PARTICLE, DIMENSION_IRRELEVANT, num, & default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER8, & MPI_INTEGER8, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, npart_global, 1, MPI_INTEGER8, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 1 * soi8 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) CALL MPI_ALLREDUCE(MINVAL(particles), mn, 1, mpireal, MPI_MIN, cfd_comm, & cfd_errcode) CALL MPI_ALLREDUCE(MAXVAL(particles), mx, 1, mpireal, MPI_MAX, cfd_comm, & cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, mn, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx, 1, mpireal, cfd_status, & cfd_errcode) END IF current_displacement = current_displacement + 2 * num CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & particle_type, "native", MPI_INFO_NULL, cfd_errcode) ! Write the real data CALL MPI_FILE_WRITE_ALL(cfd_filehandle, particles, npart_local, mpireal, & cfd_status, cfd_errcode) current_displacement = current_displacement + npart_global * num END SUBROUTINE cfd_write_nd_particle_variable_all SUBROUTINE cfd_write_nd_particle_variable_with_iterator_all(name, class, & iterator, npart_global, npart_per_iteration, meshname, meshclass, & particle_type) CHARACTER(len = *), INTENT(IN) :: name, class, meshname, meshclass INTEGER, INTENT(IN) :: particle_type INTEGER(8), INTENT(IN) :: npart_global, npart_per_iteration INTEGER(8) :: npart_this_cycle, min_npart_this_cycle REAL(num), ALLOCATABLE, DIMENSION(:) :: data INTERFACE SUBROUTINE iterator(data, npart_it, start) USE shared_data REAL(num), DIMENSION(:), INTENT(INOUT) :: data INTEGER(8), INTENT(INOUT) :: npart_it LOGICAL, INTENT(IN) :: start END SUBROUTINE iterator END INTERFACE INTEGER(8) :: npart_local INTEGER(8) :: blocklen, mdlen INTEGER(4) :: i INTEGER(4) :: sizes(2) REAL(num) :: mn, mx, mn_g, mx_g INTEGER(MPI_OFFSET_KIND) :: offset_for_min_max LOGICAL :: start ! Metadata is !* ) meshtype (INTEGER(4)) All mesh blocks contain this !* ) nd INTEGER(4) !* ) sof INTEGER(4) ! Specific to particle variable ! 1 ) npart INTEGER(8) ! 2 ) vmin REAL(num) ! 3 ) vmax REAL(num) ! 4 ) mesh CHARACTER ! 5 ) mclass CHARACTER mdlen = meshtype_header_offset + 1 * soi8 + 2 * num + 2 * max_string_len blocklen = mdlen + num * npart_global ! Now written header, write metadata CALL cfd_write_block_header(name, class, TYPE_MESH_VARIABLE, blocklen, & mdlen, default_rank) CALL cfd_write_meshtype_header(VAR_PARTICLE, DIMENSION_IRRELEVANT, num, & default_rank) CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, MPI_INTEGER8, & MPI_INTEGER8, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, npart_global, 1, MPI_INTEGER8, & cfd_status, cfd_errcode) END IF current_displacement = current_displacement + 1 * soi8 CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, 0.0_num, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, 0.0_num, 1, mpireal, cfd_status, & cfd_errcode) END IF offset_for_min_max = current_displacement current_displacement = current_displacement + 2 * num CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, & MPI_CHARACTER, MPI_CHARACTER, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL cfd_safe_write_string(meshname) CALL cfd_safe_write_string(meshclass) END IF current_displacement = current_displacement + 2 * max_string_len CALL MPI_FILE_SET_VIEW(cfd_filehandle, current_displacement, mpireal, & particle_type, "native", MPI_INFO_NULL, cfd_errcode) start = .TRUE. npart_this_cycle = npart_per_iteration ALLOCATE(data(1:npart_per_iteration)) DO data = 27.224_num CALL iterator(data, npart_this_cycle, start) IF (npart_this_cycle <= 0) EXIT IF (start) THEN mn = MINVAL(data(1:npart_this_cycle)) mx = MAXVAL(data(1:npart_this_cycle)) ELSE mn = MIN(mn, MINVAL(data(1:npart_this_cycle))) mx = MAX(mx, MAXVAL(data(1:npart_this_cycle))) END IF start = .FALSE. CALL MPI_FILE_WRITE(cfd_filehandle, data, npart_this_cycle, mpireal, & cfd_status, cfd_errcode) END DO DEALLOCATE(data) current_displacement = current_displacement + npart_global * num CALL MPI_ALLREDUCE(mn, mn_g, 1, mpireal, MPI_MIN, cfd_comm, cfd_errcode) CALL MPI_ALLREDUCE(mx, mx_g, 1, mpireal, MPI_MAX, cfd_comm, cfd_errcode) mn = mn_g mx = mx_g CALL MPI_FILE_SET_VIEW(cfd_filehandle, offset_for_min_max, mpireal, & mpireal, "native", MPI_INFO_NULL, cfd_errcode) IF (cfd_rank == default_rank) THEN CALL MPI_FILE_WRITE(cfd_filehandle, mn, 1, mpireal, cfd_status, & cfd_errcode) CALL MPI_FILE_WRITE(cfd_filehandle, mx, 1, mpireal, cfd_status, & cfd_errcode) END IF CALL MPI_BARRIER(comm, errcode) END SUBROUTINE cfd_write_nd_particle_variable_with_iterator_all END MODULE output_particle Lare3d/src/io/README000644 000765 000024 00000000305 11406627300 014337 0ustar00Tonystaff000000 000000 LARE2D io directory This directory contains the source for the custom self defining output file format used by LARE. It is STRONGLY recommended that users do not alter any files in this directory.Lare3d/src/core/conduct.f90000644 000765 000024 00000033161 11430520723 015763 0ustar00Tonystaff000000 000000 MODULE conduct USE shared_data USE boundary USE eos IMPLICIT NONE PRIVATE PUBLIC :: conduct_heat CONTAINS SUBROUTINE conduct_heat REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: uxkx, uxky, uxkz REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: uykx, uyky, uykz REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: uzkx, uzky, uzkz REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: energy0, limiter REAL(num) :: e2t, exb, eyb, ezb REAL(num) :: b, bxc, byc, bzc, bpx, bpy, bpz REAL(num) :: ux, uy, uz REAL(num) :: pow = 5.0_num / 2.0_num REAL(num) :: a1, a2, a3, error, errmax, errlast, abs_error REAL(num) :: w, residual, q_shx, q_shy, q_shz, q_sh, q_f, q_nl INTEGER :: loop, redblack, x1, y1, z1 LOGICAL :: converged REAL(num), PARAMETER :: fractional_error = 1.e-3_num REAL(num), PARAMETER :: b_min = 1.e-3_num ALLOCATE(uxkx(-1:nx+1,-1:ny+1,-1:nz+1), uxky(-1:nx+1,-1:ny+1,-1:nz+1)) ALLOCATE(uxkz(-1:nx+1,-1:ny+1,-1:nz+1)) ALLOCATE(uykx(-1:nx+1,-1:ny+1,-1:nz+1), uyky(-1:nx+1,-1:ny+1,-1:nz+1)) ALLOCATE(uykz(-1:nx+1,-1:ny+1,-1:nz+1)) ALLOCATE(uzkx(-1:nx+1,-1:ny+1,-1:nz+1), uzky(-1:nx+1,-1:ny+1,-1:nz+1)) ALLOCATE(uzkz(-1:nx+1,-1:ny+1,-1:nz+1)) ALLOCATE(energy0(-1:nx+2,-1:ny+2,-1:nz+2)) ALLOCATE(limiter(-1:nx+2,-1:ny+2,-1:nz+2)) ! find factor required to convert between energy and temperature ! N.B. only works for simple EOS e2t = (gamma - 1.0_num) / 2.0_num a1 = fractional_error * MAXVAL(energy) CALL MPI_ALLREDUCE(a1, abs_error, 1, mpireal, MPI_MAX, comm, errcode) DO iz = -1, nz + 1 DO iy = -1, ny + 1 DO ix = -1, nx + 1 ! x face centred B field bxc = bx(ix,iy,iz) byc = (by(ix,iy,iz) + by(ix+1,iy,iz) + by(ix,iy-1,iz) + by(ix+1,iy-1,iz)) / 4.0_num bzc = (bz(ix,iy,iz) + bz(ix+1,iy,iz) + bz(ix,iy,iz-1) + bz(ix+1,iy,iz-1)) / 4.0_num bpx = SQRT(bxc**2 + byc**2 + bzc**2) bpx = MAX(bpx, none_zero) exb = (energy(ix,iy,iz) + energy(ix+1,iy,iz)) / 2.0_num ! Direction of magnetic field on x face ux = bxc / bpx uy = byc / bpx uz = bzc / bpx ! Kappa along magnetic field, now a vector uxkx(ix,iy,iz) = ux * ux * kappa_0 * (e2t * exb)**pow uxky(ix,iy,iz) = ux * uy * kappa_0 * (e2t * exb)**pow uxkz(ix,iy,iz) = ux * uz * kappa_0 * (e2t * exb)**pow ! add symmetic conduction near b=0 points uxkx(ix,iy,iz) = uxkx(ix,iy,iz) + b_min**2 * kappa_0 * (e2t * exb)**pow & / (bpx**2 + b_min**2) ! y face centred B field bxc = (bx(ix,iy,iz) + bx(ix,iy+1,iz) + bx(ix-1,iy,iz) + bx(ix-1,iy+1,iz)) / 4.0_num byc = by(ix,iy,iz) bzc = (bz(ix,iy,iz) + bz(ix,iy+1,iz) + bz(ix,iy,iz-1) + bz(ix,iy+1,iz-1)) / 4.0_num bpy = SQRT(bxc**2 + byc**2 + bzc**2) bpy = MAX(bpy, none_zero) eyb = (energy(ix,iy,iz) + energy(ix,iy+1,iz)) / 2.0_num ! Direction of magnetic field on y face ux = bxc / bpy uy = byc / bpy uz = bzc / bpy ! Kappa along magnetic field, now a vector uykx(ix,iy,iz) = uy * ux * kappa_0 * (e2t * eyb)**pow uyky(ix,iy,iz) = uy * uy * kappa_0 * (e2t * eyb)**pow uykz(ix,iy,iz) = uy * uz * kappa_0 * (e2t * eyb)**pow ! add symmetic conduction near b=0 points uyky(ix,iy,iz) = uyky(ix,iy,iz) + b_min**2 * kappa_0 * (e2t * eyb)**pow & / (bpy**2 + b_min**2) ! z face centred B field bxc = (bx(ix,iy,iz) + bx(ix,iy,iz+1) + bx(ix-1,iy,iz) + bx(ix-1,iy,iz+1)) / 4.0_num byc = (by(ix,iy,iz) + by(ix,iy,iz+1) + by(ix,iy-1,iz) + by(ix,iy-1,iz+1)) / 4.0_num bzc = bz(ix,iy,iz) bpz = SQRT(bxc**2 + byc**2 + bzc**2) bpz = MAX(bpz, none_zero) ezb = (energy(ix,iy,iz) + energy(ix,iy,iz+1)) / 2.0_num ! Direction of magnetic field on z face ux = bxc / bpz uy = byc / bpz uz = bzc / bpz ! Kappa along magnetic field, now a vector uzkx(ix,iy,iz) = uz * ux * kappa_0 * (e2t * ezb)**pow uzky(ix,iy,iz) = uz * uy * kappa_0 * (e2t * ezb)**pow uzkz(ix,iy,iz) = uz * uz * kappa_0 * (e2t * ezb)**pow ! add symmetic conduction near b=0 points uzkz(ix,iy,iz) = uzkz(ix,iy,iz) + b_min**2 * kappa_0 * (e2t * ezb)**pow & / (bpz**2 + b_min**2) END DO END DO END DO IF (heat_flux_limiter) THEN DO iz = 0, nz + 1 DO iy = 0, ny + 1 DO ix = 0, nx + 1 ! estimate the parallel heat flux and the centre of a cell q_shx = & (uxkx(ix,iy,iz) + uxkx(ix-1,iy,iz)) * e2t & * (energy(ix+1,iy,iz) - energy(ix-1,iy,iz)) / dxc(ix) & + (uxky(ix,iy,iz) + uxky(ix,iy-1,iz)) * e2t & * (energy(ix,iy+1,iz) - energy(ix,iy-1,iz)) / dyc(iy) & + (uxkz(ix,iy,iz) + uxkz(ix,iy,iz-1)) * e2t & * (energy(ix,iy,iz+1) - energy(ix,iy,iz-1)) / dzc(iz) q_shy = & (uykx(ix,iy,iz) + uykx(ix-1,iy,iz)) * e2t & * (energy(ix+1,iy,iz) - energy(ix-1,iy,iz)) / dxc(ix) & + (uyky(ix,iy,iz) + uyky(ix,iy-1,iz)) * e2t & * (energy(ix,iy+1,iz) - energy(ix,iy-1,iz)) / dyc(iy) & + (uykz(ix,iy,iz) + uykz(ix,iy,iz-1)) * e2t & * (energy(ix,iy,iz+1) - energy(ix,iy,iz-1)) / dzc(iz) q_shz = & (uzkx(ix,iy,iz) + uzkx(ix-1,iy,iz)) * e2t & * (energy(ix+1,iy,iz) - energy(ix-1,iy,iz)) / dxc(ix) & + (uzky(ix,iy,iz) + uzky(ix,iy-1,iz)) * e2t & * (energy(ix,iy+1,iz) - energy(ix,iy-1,iz)) / dyc(iy) & + (uzkz(ix,iy,iz) + uzkz(ix,iy,iz-1)) * e2t & * (energy(ix,iy,iz+1) - energy(ix,iy,iz-1)) / dzc(iz) q_sh = SQRT(q_shx**2 + q_shy**2 + q_shz**2) / 16.0_num ! estimate the free streaming limit ! 42.85 = SRQT(m_p/m_e) q_f = 42.85_num * flux_limiter * rho(ix,iy,iz) & * MIN(e2t * energy(ix,iy,iz), temperature_100mk)**1.5_num q_nl = 1.0_num / (1.0_num / MAX(q_sh, none_zero) + 1.0_num / MAX(q_f, none_zero)) limiter(ix,iy,iz) = q_nl / MAX(q_sh, none_zero) / 2.0_num END DO END DO END DO DO iz = 0, nz+1 DO iy = 0, ny+1 DO ix = 0, nx+1 uxkx(ix,iy,iz) = uxkx(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix+1,iy,iz)) uxky(ix,iy,iz) = uxky(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix+1,iy,iz)) uxkz(ix,iy,iz) = uxkz(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix+1,iy,iz)) uykx(ix,iy,iz) = uykx(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix,iy+1,iz)) uyky(ix,iy,iz) = uyky(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix,iy+1,iz)) uykz(ix,iy,iz) = uykz(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix,iy+1,iz)) uzkx(ix,iy,iz) = uzkx(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix,iy,iz+1)) uzky(ix,iy,iz) = uzky(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix,iy,iz+1)) uzkz(ix,iy,iz) = uzkz(ix,iy,iz) * (limiter(ix,iy,iz) + limiter(ix,iy,iz+1)) END DO END DO END DO END IF converged = .FALSE. w = 1.6_num ! initial over-relaxation parameter ! store energy^{n} energy0 = energy ! interate to get energy^{n+1} by SOR Guass-Seidel iterate: DO loop = 1, 100 errmax = 0.0_num errlast = 0.0_num error = 0.0_num z1 = 1 DO redblack = 1, 2 y1 = z1 DO iz = 1, nz x1 = z1 DO iy = 1, ny DO ix = x1, nx, 2 ! terms containing energy(ix,iz) resulting from ! d^2/dx^2 and d^2/dy^2 derivatives a1 = uxkx(ix,iy,iz)/(dxc(ix)*dxb(ix)) + uxkx(ix-1,iy,iz)/(dxc(ix-1)*dxb(ix)) & + uyky(ix,iy,iz)/(dyc(iy)*dyb(iy)) + uyky(ix,iy-1,iz)/(dyc(iy-1)*dyb(iy)) & + uzkz(ix,iy,iz)/(dzc(iz)*dzb(iz)) + uzkz(ix,iy,iz-1)/(dzc(iz-1)*dzb(iz)) ! terms not containing energy(ix,iy,iz) resulting from ! d^2/dx^2, d^2/dy^2 and d^2/dz^2 derivatives a2 = uxkx(ix,iy,iz)*e2t*energy(ix+1,iy,iz)/(dxc(ix)*dxb(ix)) & + uxkx(ix-1,iy,iz)*e2t*energy(ix-1,iy,iz)/(dxc(ix-1)*dxb(ix)) a2 = a2 + uyky(ix,iy,iz)*e2t*energy(ix,iy+1,iz)/(dyc(iy)*dyb(iy)) & + uyky(ix,iy-1,iz)*e2t*energy(ix,iy-1,iz)/(dyc(iy-1)*dyb(iy)) a2 = a2 + uzkz(ix,iy,iz)*e2t*energy(ix,iy,iz+1)/(dzc(iz)*dzb(iz)) & + uzkz(ix,iy,iz-1)*e2t*energy(ix,iy,iz-1)/(dzc(iz-1)*dzb(iz)) ! terms not containing energy(ix,iy,iz) resulting from ! d^2/dxdy cross derivatives a2 = a2 + uxky(ix,iy,iz) * e2t & * (energy(ix+1,iy+1,iz) + energy(ix,iy+1,iz) - energy(ix+1,iy-1,iz) & - energy(ix,iy-1,iz)) & / (2.0_num * dxb(ix) * (dyc(iy) + dyc(iy-1))) a2 = a2 - uxky(ix-1,iy,iz) * e2t & * (energy(ix,iy+1,iz) + energy(ix-1,iy+1,iz) - energy(ix,iy-1,iz) & - energy(ix-1,iy-1,iz)) & / (2.0_num * dxb(ix) * (dyc(iy) + dyc(iy-1))) ! terms not containing energy(ix,iy,iz) resulting from ! d^2/dxdz cross derivatives a2 = a2 + uxkz(ix,iy,iz) * e2t & * (energy(ix+1,iy,iz+1) + energy(ix,iy,iz+1) - energy(ix+1,iy,iz-1) & - energy(ix,iy,iz-1)) & / (2.0_num * dxb(ix) * (dzc(iz) + dzc(iz-1))) a2 = a2 - uxkz(ix-1,iy,iz) * e2t & * (energy(ix,iy,iz+1) + energy(ix-1,iy,iz+1) - energy(ix,iy,iz-1) & - energy(ix-1,iy,iz-1)) & / (2.0_num * dxb(ix) * (dzc(iz) + dzc(iz-1))) ! terms not containing energy(ix,iy,iz) resulting from ! d^2/dydx cross derivatives a2 = a2 + uykx(ix,iy,iz) * e2t & * (energy(ix+1,iy+1,iz) + energy(ix+1,iy,iz) - energy(ix-1,iy+1,iz) & - energy(ix-1,iy,iz)) & / (2.0_num * dyb(iy) * (dxc(ix) + dxc(ix-1))) a2 = a2 - uykx(ix,iy-1,iz) * e2t & * (energy(ix+1,iy,iz) + energy(ix+1,iy-1,iz) - energy(ix-1,iy,iz) & - energy(ix-1,iy-1,iz)) & / (2.0_num * dyb(iy) * (dxc(ix) + dxc(ix-1))) ! terms not containing energy(ix,iy,iz) resulting from ! d^2/dydz cross derivatives a2 = a2 + uykz(ix,iy,iz) * e2t & * (energy(ix,iy+1,iz+1) + energy(ix,iy,iz+1) - energy(ix,iy+1,iz-1) & - energy(ix,iy,iz-1)) & / (2.0_num * dyb(iy) * (dzc(iz) + dzc(iz-1))) a2 = a2 - uykz(ix,iy-1,iz) * e2t & * (energy(ix,iy,iz+1) + energy(ix,iy-1,iz+1) - energy(ix,iy,iz-1) & - energy(ix,iy-1,iz-1)) & / (2.0_num * dyb(iy) * (dzc(iz) + dzc(iz-1))) ! terms not containing energy(ix,iy,iz) resulting from ! d^2/dzdx cross derivatives a2 = a2 + uzkx(ix,iy,iz) * e2t & * (energy(ix+1,iy,iz+1) + energy(ix+1,iy,iz) - energy(ix-1,iy,iz+1) & - energy(ix-1,iy,iz)) & / (2.0_num * dzb(iz) * (dxc(ix) + dxc(ix-1))) a2 = a2 - uzkx(ix,iy,iz-1) * e2t & * (energy(ix+1,iy,iz) + energy(ix+1,iy,iz-1) - energy(ix-1,iy,iz) & - energy(ix-1,iy,iz-1)) & / (2.0_num * dzb(iz) * (dxc(ix) + dxc(ix-1))) ! terms not containing energy(ix,iy,iz) resulting from ! d^2/dzdy cross derivatives a2 = a2 + uzky(ix,iy,iz) * e2t & * (energy(ix,iy+1,iz+1) + energy(ix,iy+1,iz) - energy(ix,iy-1,iz+1) & - energy(ix,iy-1,iz)) & / (2.0_num * dzb(iz) * (dyc(iy) + dyc(iy-1))) a2 = a2 - uzky(ix,iy,iz-1) * e2t & * (energy(ix,iy+1,iz) + energy(ix,iy+1,iz-1) - energy(ix,iy-1,iz) & - energy(ix,iy-1,iz-1)) & / (2.0_num * dzb(iz) * (dyc(iy) + dyc(iy-1))) a1 = a1 * dt * e2t / rho(ix,iy,iz) a2 = a2 * dt / rho(ix,iy,iz) residual = energy(ix,iy,iz) & - (energy0(ix,iy,iz) + a2) / (1.0_num + a1) energy(ix,iy,iz) = MAX(energy(ix,iy,iz) - w * residual, 0.0_num) error = ABS(residual) errmax = MAX(errmax, error) END DO x1 = 3 - x1 END DO y1 = 3 - y1 END DO z1 = 3 - z1 CALL energy_bcs END DO CALL MPI_ALLREDUCE(errmax, error, 1, mpireal, MPI_MAX, comm, errcode) errmax = error IF (errmax > errlast) w = (1.0_num + w) / 2.0_num errlast = errmax IF (errmax .LT. abs_error) THEN converged = .TRUE. EXIT iterate END IF END DO iterate IF (rank == 0 .AND. .NOT. converged) PRINT * , "Conduction failed at t = ", time DEALLOCATE(uxkx, uxky, uxkz) DEALLOCATE(uykx, uyky, uykz) DEALLOCATE(uzkx, uzky, uzkz) DEALLOCATE(energy0) DEALLOCATE(limiter) END SUBROUTINE conduct_heat END MODULE conduct Lare3d/src/core/eos.F90000644 000765 000024 00000003526 11430520723 015054 0ustar00Tonystaff000000 000000 MODULE EOS USE shared_data USE normalise IMPLICIT NONE CONTAINS ! This module contains all the information about the equations of state ! used by LARE. SUBROUTINE get_pressure(rho_in, en_in, m_in, ix, iy, iz, p) REAL(num), INTENT(IN) :: rho_in, en_in ! input energy & density INTEGER, INTENT(IN) :: m_in ! EOS number INTEGER, INTENT(IN) :: ix, iy, iz REAL(num), INTENT(OUT) :: p ! output pressure IF (m_in .EQ. EOS_IDEAL) THEN p = en_in * rho_in * (gamma - 1.0_num) RETURN END IF IF (m_in .EQ. EOS_PI) THEN p = en_in * rho_in * (gamma - 1.0_num) RETURN END IF IF (m_in .EQ. EOS_ION) THEN p = (en_in - (1.0_num - xi_n(ix, iy, iz)) * ionise_pot) & * (gamma - 1.0_num) * rho_in RETURN END IF END SUBROUTINE get_pressure SUBROUTINE get_temp(rho_in, energy_in, m_in, ix, iy, iz, temp_out) REAL(num), INTENT(IN) :: rho_in, energy_in INTEGER, INTENT(IN) :: m_in, ix, iy, iz REAL(num), INTENT(OUT) :: temp_out ! mbar and kb will be the correct form for the normalisation when ! the code is running, or when setting up initial conditions with ! SI_Code = F, mbar and kb are both 1.0, reducing to the normal case ! when setting up initial conditions with SI_Code = T, kb and mbar ! will have their normal SI values IF (m_in .EQ. EOS_IDEAL) THEN temp_out = energy_in * (gamma - 1.0_num) / 2.0_num RETURN END IF IF (m_in .EQ. EOS_PI) THEN temp_out = (gamma - 1.0_num) * energy_in / (2.0_num - xi_n(ix, iy, iz)) RETURN END IF IF (m_in .EQ. EOS_ION) THEN temp_out = (gamma - 1.0_num) & * (energy_in - (1.0_num - xi_n(ix, iy, iz)) * ionise_pot) & / ((2.0_num - xi_n(ix, iy, iz))) RETURN END IF END SUBROUTINE get_temp END MODULE EOS Lare3d/src/core/lagran.F90000644 000765 000024 00000130511 11430520723 015525 0ustar00Tonystaff000000 000000 MODULE lagran !----------------------------------------------------------------- ! This subroutine performs the Lagrangian step ! Notes: ! There are !#DEC$ directives in this routine ! These override compilers vector analysis tools !----------------------------------------------------------------- USE shared_data USE boundary USE neutral USE diagnostics USE eos USE conduct IMPLICIT NONE PRIVATE PUBLIC :: lagrangian_step, eta_calc ! only used inside lagran.f90 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: qxy, qxz, qyz, pressure REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: qxx, qyy, qzz, visc_heat REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: flux_x, flux_y, flux_z, curlb CONTAINS SUBROUTINE lagrangian_step INTEGER :: substeps, subcycle REAL(num) :: actual_dt, dt_sub ALLOCATE (bx1(0:nx+1, 0:ny+1, 0:nz+1), by1(0:nx+1, 0:ny+1, 0:nz+1), & bz1(0:nx+1, 0:ny+1, 0:nz+1), qxy(0:nx+1, 0:ny+1, 0:nz+1), & qxz(0:nx+1, 0:ny+1, 0:nz+1), qyz(0:nx+1, 0:ny+1, 0:nz+1), & visc_heat(0:nx+1, 0:ny+1, 0:nz+1), & pressure(-1:nx+2, -1:ny+2, -1:nz+2), qxx(0:nx+1, 0:ny+1, 0:nz+1), & qyy(0:nx+1, 0:ny+1, 0:nz+1), qzz(0:nx+1, 0:ny+1, 0:nz+1), & flux_x(0:nx, 0:ny, 0:nz), flux_y(0:nx, 0:ny, 0:nz), & flux_z(0:nx, 0:ny, 0:nz), curlb(0:nx, 0:ny, 0:nz)) IF (include_neutrals) CALL neutral_fraction(eos_number) IF (resistive_mhd .OR. hall_mhd) THEN ! if subcycling isn't wanted set dt = dtr in set_dt, don't just ! set substeps to 1. IF (resistive_mhd) THEN dt_sub = dtr ELSE dt_sub = dth END IF IF (resistive_mhd .AND. hall_mhd) dt_sub = MIN(dtr, dth) substeps = INT(dt / dt_sub) + 1 IF (substeps > peak_substeps) peak_substeps = substeps actual_dt = dt dt = dt / REAL(substeps, num) DO subcycle = 1, substeps CALL eta_calc IF (include_neutrals) CALL neutral_fraction(eos_number) IF (cowling_resistivity) CALL perpendicular_resistivity ! IF (hall_mhd) CALL hall_effects IF (resistive_mhd) CALL resistive_effects END DO dt = actual_dt END IF IF (conduction) CALL conduct_heat DO iz = 0, nz+1 izm = iz - 1 DO iy = 0, ny+1 iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx+1 ixm = ix - 1 bx1(ix, iy, iz) = (bx(ix, iy, iz) + bx(ixm, iy, iz)) / 2.0_num by1(ix, iy, iz) = (by(ix, iy, iz) + by(ix, iym, iz)) / 2.0_num bz1(ix, iy, iz) = (bz(ix, iy, iz) + bz(ix, iy, izm)) / 2.0_num END DO END DO END DO CALL predictor_corrector_step DEALLOCATE (bx1, by1, bz1, qxy, qxz, qyz, visc_heat, pressure, qxx, qyy, & qzz, flux_x, flux_y, flux_z, curlb) CALL energy_bcs CALL density_bcs CALL velocity_bcs END SUBROUTINE lagrangian_step SUBROUTINE predictor_corrector_step REAL(num) :: p, pxp, pyp, pxpyp REAL(num) :: pzp, pxpzp, pypzp, pxpypzp REAL(num) :: e1, rho_v REAL(num) :: fx, fy, fz REAL(num) :: vxb, vxbm, vyb, vybm, vzb, vzbm REAL(num) :: bxv, byv, bzv, jx, jy, jz REAL(num) :: cvx, cvxp, cvy, cvyp, cvz, cvzp REAL(num) :: dv dt2 = dt / 2.0_num CALL viscosity_and_b_update bx1 = bx1 * cv1(0:nx+1, 0:ny+1, 0:nz+1) by1 = by1 * cv1(0:nx+1, 0:ny+1, 0:nz+1) bz1 = bz1 * cv1(0:nx+1, 0:ny+1, 0:nz+1) DO iz = 0, nz+1 DO iy = 0, ny+1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx+1 dv = cv1(ix, iy, iz) / cv(ix, iy, iz) - 1.0_num ! predictor energy #ifdef Q_MONO ! add shock viscosity pressure(ix, iy, iz) = pressure(ix, iy, iz) + p_visc(ix, iy, iz) #endif e1 = energy(ix, iy, iz) - pressure(ix, iy, iz) * dv / rho(ix, iy, iz) e1 = e1 + visc_heat(ix, iy, iz) * dt2 / rho(ix, iy, iz) ! now define the predictor step pressures CALL get_pressure(rho(ix, iy, iz) * cv(ix, iy, iz) / cv1(ix, iy, iz),& e1, eos_number, ix, iy, iz, pressure(ix, iy, iz)) #ifdef Q_MONO ! add shock viscosity pressure(ix, iy, iz) = pressure(ix, iy, iz) + p_visc(ix, iy, iz) #endif END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 p = pressure(ix , iy , iz ) pxp = pressure(ixp, iy , iz ) pyp = pressure(ix , iyp, iz ) pxpyp = pressure(ixp, iyp, iz ) pzp = pressure(ix , iy , izp) pxpzp = pressure(ixp, iy , izp) pypzp = pressure(ix , iyp, izp) pxpypzp = pressure(ixp, iyp, izp) w1 = (p + pyp + pzp + pypzp) / 4.0_num w2 = (pxp + pxpyp + pxpzp + pxpypzp) / 4.0_num fx = -(w2 - w1) / dxc(ix) w1 = (p + pxp + pzp + pxpzp) / 4.0_num w2 = (pyp + pxpyp + pypzp + pxpypzp) / 4.0_num fy = -(w2 - w1) / dyc(iy) w1 = (p + pxp + pyp + pxpyp) / 4.0_num w2 = (pzp + pxpzp + pypzp + pxpypzp) / 4.0_num fz = -(w2 - w1) / dzc(iz) ! add diagonal components w1 = (qxx(ix, iy, iz) + qxx(ix, iyp, iz) & + qxx(ix, iy, izp) + qxx(ix, iyp, izp)) / 4.0_num w2 = (qxx(ixp, iy, iz) + qxx(ixp, iyp, iz) & + qxx(ixp, iy, izp) + qxx(ixp, iyp, izp)) / 4.0_num fx = fx + (w2 - w1) / dxc(ix) w1 = (qyy(ix, iy, iz) + qyy(ixp, iy, iz) & + qyy(ix, iy, izp) + qyy(ixp, iy, izp)) / 4.0_num w2 = (qyy(ix, iyp, iz) + qyy(ixp, iyp, iz) & + qyy(ix, iyp, izp) + qyy(ixp, iyp, izp)) / 4.0_num fy = fy + (w2 - w1) / dyc(iy) w1 = (qzz(ix, iy, iz) + qzz(ixp, iy, iz) & + qzz(ix, iyp, iz) + qzz(ixp, iyp, iz)) / 4.0_num w2 = (qzz(ix, iy, izp) + qzz(ixp, iy, izp) & + qzz(ix, iyp, izp) + qzz(ixp, iyp, izp)) / 4.0_num fz = fz + (w2 - w1) / dzc(iz) ! add shear viscosity forces ! fx w1 = (qxy(ix, iy, iz) + qxy(ixp, iy, iz) & + qxy(ix, iy, izp) + qxy(ixp, iy, izp)) / 4.0_num w2 = (qxy(ix, iyp, iz) + qxy(ixp, iyp, iz) & + qxy(ix, iyp, izp) + qxy(ixp, iyp, izp)) / 4.0_num fx = fx + (w2 - w1) / dyc(iy) w1 = (qxz(ix, iy, iz) + qxz(ixp, iy, iz) & + qxz(ix, iyp, iz) + qxz(ixp, iyp, iz)) / 4.0_num w2 = (qxz(ix, iy, izp) + qxz(ixp, iy, izp) & + qxz(ix, iyp, izp) + qxz(ixp, iyp, izp)) / 4.0_num fx = fx + (w2 - w1) / dzc(iz) ! fy w1 = (qxy(ix, iy, iz) + qxy(ix, iyp, iz) & + qxy(ix, iy, izp) + qxy(ix, iyp, izp)) / 4.0_num w2 = (qxy(ixp, iy, iz) + qxy(ixp, iyp, iz) & + qxy(ixp, iy, izp) + qxy(ixp, iyp, izp)) / 4.0_num fy = fy + (w2 - w1) / dxc(ix) w1 = (qyz(ix, iy, iz) + qyz(ixp, iy, iz) & + qyz(ix, iyp, iz) + qyz(ixp, iyp, iz)) / 4.0_num w2 = (qyz(ix, iy, izp) + qyz(ixp, iy, izp) & + qyz(ix, iyp, izp) + qyz(ixp, iyp, izp)) / 4.0_num fy = fy + (w2 - w1) / dzc(iz) ! fz w1 = (qxz(ix, iy, iz) + qxz(ix, iyp, iz) & + qxz(ix, iy, izp) + qxz(ix, iyp, izp)) / 4.0_num w2 = (qxz(ixp, iy, iz) + qxz(ixp, iyp, iz) & + qxz(ixp, iy, izp) + qxz(ixp, iyp, izp)) / 4.0_num fz = fz + (w2 - w1) / dxc(ix) w1 = (qyz(ix, iy, iz) + qyz(ixp, iy, iz) & + qyz(ix, iy, izp) + qyz(ixp, iy, izp)) / 4.0_num w2 = (qyz(ix, iyp, iz) + qyz(ixp, iyp, iz) & + qyz(ix, iyp, izp) + qyz(ixp, iyp, izp)) / 4.0_num fz = fz + (w2 - w1) / dyc(iy) cvx = cv1(ix, iy, iz) + cv1(ix, iyp, iz) & + cv1(ix, iy, izp) + cv1(ix, iyp, izp) cvxp = cv1(ixp, iy, iz) + cv1(ixp, iyp, iz) & + cv1(ixp, iy, izp) + cv1(ixp, iyp, izp) cvy = cv1(ix, iy, iz) + cv1(ixp, iy, iz) & + cv1(ix, iy, izp) + cv1(ixp, iy, izp) cvyp = cv1(ix, iyp, iz) + cv1(ixp, iyp, iz) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp) cvz = cv1(ix, iy, iz) + cv1(ixp, iy, iz) & + cv1(ix, iyp, iz) + cv1(ixp, iyp, iz) cvzp = cv1(ix, iy, izp) + cv1(ixp, iy, izp) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp) w1 = (bz1(ix, iy, iz) + bz1(ixp, iy, iz) & + bz1(ix, iy, izp) + bz1(ixp, iy, izp)) / cvy w2 = (bz1(ix, iyp, iz) + bz1(ixp, iyp, iz) & + bz1(ix, iyp, izp) + bz1(ixp, iyp, izp)) / cvyp jx = (w2 - w1) / dyc(iy) w1 = (by1(ix, iy, iz) + by1(ixp, iy, iz) & + by1(ix, iyp, iz) + by1(ixp, iyp, iz)) / cvz w2 = (by1(ix, iy, izp) + by1(ixp, iy, izp) & + by1(ix, iyp, izp) + by1(ixp, iyp, izp)) / cvzp jx = jx - (w2 - w1) / dzc(iz) w1 = (bz1(ix, iy, iz) + bz1(ix, iyp, iz) & + bz1(ix, iy, izp) + bz1(ix, iyp, izp)) / cvx w2 = (bz1(ixp, iy, iz) + bz1(ixp, iyp, iz) & + bz1(ixp, iy, izp) + bz1(ixp, iyp, izp)) / cvxp jy = -(w2 - w1) / dxc(ix) w1 = (bx1(ix, iy, iz) + bx1(ixp, iy, iz) & + bx1(ix, iyp, iz) + bx1(ixp, iyp, iz)) / cvz w2 = (bx1(ix, iy, izp) + bx1(ixp, iy, izp) & + bx1(ix, iyp, izp) + bx1(ixp, iyp, izp)) / cvzp jy = jy + (w2 - w1) / dzc(iz) w1 = (by1(ix, iy, iz) + by1(ix, iyp, iz) & + by1(ix, iy, izp) + by1(ix, iyp, izp)) / cvx w2 = (by1(ixp, iy, iz) + by1(ixp, iyp, iz) & + by1(ixp, iy, izp) + by1(ixp, iyp, izp)) / cvxp jz = (w2 - w1) / dxc(ix) w1 = (bx1(ix, iy, iz) + bx1(ixp, iy, iz) & + bx1(ix, iy, izp) + bx1(ixp, iy, izp)) / cvy w2 = (bx1(ix, iyp, iz) + bx1(ixp, iyp, iz) & + bx1(ix, iyp, izp) + bx1(ixp, iyp, izp)) / cvyp jz = jz - (w2 - w1) / dyc(iy) bxv = (bx1(ix , iy , iz ) + bx1(ixp, iy , iz ) & + bx1(ix , iy , izp) + bx1(ixp, iy , izp) & + bx1(ix , iyp, iz ) + bx1(ixp, iyp, iz ) & + bx1(ix , iyp, izp) + bx1(ixp, iyp, izp)) & / (cvx + cvxp) byv = (by1(ix , iy , iz ) + by1(ixp, iy , iz ) & + by1(ix , iy , izp) + by1(ixp, iy , izp) & + by1(ix , iyp, iz ) + by1(ixp, iyp, iz ) & + by1(ix , iyp, izp) + by1(ixp, iyp, izp)) & / (cvx + cvxp) bzv = (bz1(ix , iy , iz ) + bz1(ixp, iy , iz ) & + bz1(ix , iy , izp) + bz1(ixp, iy , izp) & + bz1(ix , iyp, iz ) + bz1(ixp, iyp, iz ) & + bz1(ix , iyp, izp) + bz1(ixp, iyp, izp)) & / (cvx + cvxp) fx = fx + (jy * bzv - jz * byv) fy = fy - (jx * bzv - jz * bxv) fz = fz + (jx * byv - jy * bxv) rho_v = rho(ix , iy , iz ) * cv(ix , iy , iz ) & + rho(ixp, iy , iz ) * cv(ixp, iy , iz ) & + rho(ix , iyp, iz ) * cv(ix , iyp, iz ) & + rho(ixp, iyp, iz ) * cv(ixp, iyp, iz ) & + rho(ix , iy , izp) * cv(ix , iy , izp) & + rho(ixp, iy , izp) * cv(ixp, iy , izp) & + rho(ix , iyp, izp) * cv(ix , iyp, izp) & + rho(ixp, iyp, izp) * cv(ixp, iyp, izp) rho_v = rho_v / (cv(ix, iy , iz ) + cv(ixp, iy , iz ) & + cv(ix, iyp, iz ) + cv(ixp, iyp, iz ) & + cv(ix, iy , izp) + cv(ixp, iy , izp) & + cv(ix, iyp, izp) + cv(ixp, iyp, izp)) fz = fz - (rho_v * grav(iz)) ! find half step velocity needed for remap vx1(ix, iy, iz) = vx(ix, iy, iz) + dt2 * fx / rho_v vy1(ix, iy, iz) = vy(ix, iy, iz) + dt2 * fy / rho_v vz1(ix, iy, iz) = vz(ix, iy, iz) + dt2 * fz / rho_v ! velocity at the end of the Lagrangian step vx(ix, iy, iz) = vx(ix, iy, iz) + dt * fx / rho_v vy(ix, iy, iz) = vy(ix, iy, iz) + dt * fy / rho_v vz(ix, iy, iz) = vz(ix, iy, iz) + dt * fz / rho_v END DO END DO END DO IF (any_open) CALL store_boundary_dv CALL remap_v_bcs CALL visc_heating ! finally correct density and energy to final values DO iz = 1, nz izm = iz - 1 DO iy = 1, ny iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx ixm = ix - 1 vxb = (vx1(ix, iy, iz) + vx1(ix, iym, iz) & + vx1(ix, iy, izm) + vx1(ix, iym, izm)) / 4.0_num vxbm = (vx1(ixm, iy, iz) + vx1(ixm, iym, iz) & + vx1(ixm, iy, izm) + vx1(ixm, iym, izm)) / 4.0_num vyb = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iy, izm) + vy1(ixm, iy, izm)) / 4.0_num vybm = (vy1(ix, iym, iz) + vy1(ixm, iym, iz) & + vy1(ix, iym, izm) + vy1(ixm, iym, izm)) / 4.0_num vzb = (vz1(ix, iy, iz) + vz1(ixm, iy, iz) & + vz1(ix, iym, iz) + vz1(ixm, iym, iz)) / 4.0_num vzbm = (vz1(ix, iy, izm) + vz1(ixm, iy, izm) & + vz1(ix, iym, izm) + vz1(ixm, iym, izm)) / 4.0_num dv = ((vxb - vxbm) / dxb(ix) + (vyb - vybm) / dyb(iy) & + (vzb - vzbm) / dzb(iz)) * dt ! it is possible that dv has changed sign since the predictor step ! in this case p_visc * dv ought to be removed from the heating ! if QMONO is set - not done for simplicity since this is a rare ! combination cv1(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) ! energy at end of Lagrangian step energy(ix, iy, iz) = energy(ix, iy, iz) & - pressure(ix, iy, iz) * dv / rho(ix, iy, iz) energy(ix, iy, iz) = energy(ix, iy, iz) & + dt * visc_heat(ix, iy, iz) / rho(ix, iy, iz) ! density at end of Lagrangian step rho(ix, iy, iz) = rho(ix, iy, iz) / (1.0_num + dv) total_visc_heating = total_visc_heating & + dt * visc_heat(ix, iy, iz) * cv(ix, iy, iz) #ifdef Q_MONO total_visc_heating = total_visc_heating & - p_visc(ix, iy, iz) * dv * cv(ix, iy, iz) #endif END DO END DO END DO END SUBROUTINE predictor_corrector_step SUBROUTINE viscosity_and_b_update ! wilkins viscosity and B field update REAL(num) :: vxb, vxbm, vyb, vybm, vzb, vzbm REAL(num) :: p, pxp, pxm, pyp, pym, pzp, pzm, fx, fy, fz, dv REAL(num) :: dvxdx, dvydy, dvzdz, dvxy, dvxz, dvyz, s, L, L2, cf REAL(num) :: sxx, syy, szz, sxy, sxz, syz REAL(num) :: dvxdy, dvxdz, dvydx, dvydz, dvzdx, dvzdy REAL(num) :: cs REAL(num) :: w2_1,w2_2,w2_3,w2_4 REAL(num) :: flag1,flag2,flag3,flag4,sg0,dvg0 DO iz = -1, nz+2 DO iy = -1, ny+2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = -1, nx+2 CALL get_pressure(rho(ix, iy, iz), energy(ix, iy, iz), eos_number, & ix, iy, iz, pressure(ix, iy, iz)) END DO END DO END DO DO iz = 0, nz+1 izm = iz - 1 izp = iz + 1 DO iy = 0, ny+1 iym = iy - 1 iyp = iy + 1 DO ix = 0, nx+1 ixm = ix - 1 ixp = ix + 1 ! vx at Sx(i, j, k) vxb = (vx(ix, iy, iz) + vx(ix, iym, iz) & + vx(ix, iy, izm) + vx(ix, iym, izm)) / 4.0_num ! vx at Sx(i-1, j, k) vxbm = (vx(ixm, iy, iz) + vx(ixm, iym, iz) & + vx(ixm, iy, izm) + vx(ixm, iym, izm)) / 4.0_num ! vy at Sy(i, j, k) vyb = (vy(ix, iy, iz) + vy(ixm, iy, iz) & + vy(ix, iy, izm) + vy(ixm, iy, izm)) / 4.0_num ! vy at Sy(i, j-1, k) vybm = (vy(ix, iym, iz) + vy(ixm, iym, iz) & + vy(ix, iym, izm) + vy(ixm, iym, izm)) / 4.0_num ! vz at Sz(i, j, k) vzb = (vz(ix, iy, iz) + vz(ixm, iy, iz) & + vz(ix, iym, iz) + vz(ixm, iym, iz)) / 4.0_num ! vz at Sz(i, j, k-1) vzbm = (vz(ix, iy, izm) + vz(ixm, iy, izm) & + vz(ix, iym, izm) + vz(ixm, iym, izm)) / 4.0_num dvxdx = (vxb - vxbm) / dxb(ix) dvydy = (vyb - vybm) / dyb(iy) dvzdz = (vzb - vzbm) / dzb(iz) dv = (dvxdx + dvydy + dvzdz) * dt2 cv1(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) ! vx at Sy(i, j, k) vxb = (vx(ix, iy, iz) + vx(ixm, iy, iz) & + vx(ix, iy, izm) + vx(ixm, iy, izm)) / 4.0_num ! vx at Sy(i, j-1, k) vxbm = (vx(ix, iym, iz) + vx(ixm, iym, iz) & + vx(ix, iym, izm) + vx(ixm, iym, izm)) / 4.0_num ! vy at Sx(i, j, k) vyb = (vy(ix, iy, iz) + vy(ix, iym, iz) & + vy(ix, iy, izm) + vy(ix, iym, izm)) / 4.0_num ! vy at Sx(i-1, j, k) vybm = (vy(ixm, iy, iz) + vy(ixm, iym, iz) & + vy(ixm, iy, izm) + vy(ixm, iym, izm)) / 4.0_num dvxdy = (vxb - vxbm) / dyb(iy) dvydx = (vyb - vybm) / dxb(ix) dvxy = dvxdy + dvydx sxy = dvxy / 2.0_num sxx = 2.0_num * dvxdx / 3.0_num - (dvydy + dvzdz) / 3.0_num syy = 2.0_num * dvydy / 3.0_num - (dvxdx + dvzdz) / 3.0_num szz = 2.0_num * dvzdz / 3.0_num - (dvxdx + dvydy) / 3.0_num ! vx at Sz(i, j, k) vxb = (vx(ix, iy, iz) + vx(ixm, iy, iz) & + vx(ix, iym, iz) + vx(ixm, iym, iz)) / 4.0_num ! vx at Sz(i, j, k-1) vxbm = (vx(ix, iy, izm) + vx(ixm, iy, izm) & + vx(ix, iym, izm) + vx(ixm, iym, izm)) / 4.0_num ! vz at Sx(i, j, k) vzb = (vz(ix, iy, iz) + vz(ix, iym, iz) & + vz(ix, iy, izm) + vz(ix, iym, izm)) / 4.0_num ! vz at Sx(i-1, j, k) vzbm = (vz(ixm, iy, iz) + vz(ixm, iym, iz) & + vz(ixm, iy, izm) + vz(ixm, iym, izm)) / 4.0_num dvxdz = (vxb - vxbm) / dzb(iz) dvzdx = (vzb - vzbm) / dxb(ix) dvxz = dvxdz + dvzdx sxz = dvxz / 2.0_num ! vy at Sz(i, j, k) vyb = (vy(ix, iy, iz) + vy(ixm, iy, iz) & + vy(ix, iym, iz) + vy(ixm, iym, iz)) / 4.0_num ! vy at Sz(i, j, k-1) vybm = (vy(ix, iy, izm) + vy(ixm, iy, izm) & + vy(ix, iym, izm) + vy(ixm, iym, izm)) / 4.0_num ! vz at Sy(i, j, k) vzb = (vz(ix, iy, iz) + vz(ixm, iy, iz) & + vz(ix, iy, izm) + vz(ixm, iy, izm)) / 4.0_num ! vz at Sy(i, j-1, k) vzbm = (vz(ix, iym, iz) + vz(ixm, iym, iz) & + vz(ix, iym, izm) + vz(ixm, iym, izm)) / 4.0_num dvydz = (vyb - vybm) / dzb(iz) dvzdy = (vzb - vzbm) / dyb(iy) dvyz = dvydz + dvzdy syz = dvyz / 2.0_num p = energy(ix, iy, iz) * (gamma - 1.0_num) * rho(ix, iy, iz) pxp = energy(ixp, iy, iz) * (gamma - 1.0_num) * rho(ixp, iy, iz) pxm = energy(ixm, iy, iz) * (gamma - 1.0_num) * rho(ixm, iy, iz) pyp = energy(ix, iyp, iz) * (gamma - 1.0_num) * rho(ix, iyp, iz) pym = energy(ix, iym, iz) * (gamma - 1.0_num) * rho(ix, iym, iz) pzp = energy(ix, iy, izp) * (gamma - 1.0_num) * rho(ix, iy, izp) pzm = energy(ix, iy, izm) * (gamma - 1.0_num) * rho(ix, iy, izm) ! should be half this but this cancels later fx = -(pxp - pxm) / dxb(ix) fy = -(pyp - pym) / dyb(iy) fz = -(pzp - pzm) / dzb(iz) w1 = fx**2 + fy**2 + fz**2 s = (dvxdx * fx**2 + dvydy * fy**2 + dvzdz * fz**2 + dvxy * fx * fy & + dvxz * fx * fz + dvyz * fy * fz) / MAX(w1, none_zero) ! These flags are used to replace the rather clearer code in ! **SECTION 1**. They are used instead to facilitate vector ! optimization flag1=MAX(SIGN(1.0_num,dyb(iy)*ABS(fx)-dxb(ix)*ABS(fy)),0.0_num) flag2=MAX(SIGN(1.0_num,dzb(iz)*ABS(fx)-dxb(ix)*ABS(fz)),0.0_num) flag3=MAX(SIGN(1.0_num,dzb(iz)*ABS(fy)-dyb(iy)*ABS(fz)),0.0_num) flag4=MAX(SIGN(1.0_num,w1-1.e-6_num),0.0_num) w2_1=dxb(ix)**2*w1 / MAX(fx**2, 1.e-20_num) w2_2=dzb(iz)**2*w1 / MAX(fz**2, 1.e-20_num) w2_3=dyb(iy)**2*w1 / MAX(fy**2, 1.e-20_num) w2_4=dzb(iz)**2*w1 / MAX(fz**2, 1.e-20_num) w2=w2_1*flag1*flag2 + w2_2*flag1*(1.0_num-flag2)& +w2_3*(1.0_num-flag1)*flag3 + w2_4*(1.0_num-flag1)*(1.0_num-flag3) w2=w2*flag4 + MIN(dxb(ix), dyb(iy), dzb(iz))**2 * (1.0_num-flag4) !!$ !BEGIN **SECTION 1** !!$ !**** ! IF (dxb(ix) * ABS(fy) < dyb(iy) * ABS(fx)) THEN ! IF (dxb(ix) * ABS(fz) < dzb(iz) * ABS(fx)) THEN ! w2 = dxb(ix)**2 * w1 / (fx**2 + 1.e-20_num) ! ELSE ! w2 = dzb(iz)**2 * w1 / (fz**2 + 1.e-20_num) ! END IF ! ELSE ! IF (dyb(iy) * ABS(fz) < dzb(iz) * ABS(fy)) THEN ! w2 = dyb(iy)**2 * w1 / (fy**2 + 1.e-20_num) ! ELSE ! w2 = dzb(iz)**2 * w1 / (fz**2 + 1.e-20_num) ! END IF ! END IF ! IF (w1 < 1.e-6_num) w2 = MIN(dxb(ix), dyb(iy), dzb(iz))**2 !$ !** L = SQRT(w2) L2 = L !This code is equivalent to IF (s > 0 .OR. dv > 0) L=0.0 sg0 = MAX(SIGN(1.0_num,s),0.0_num) dvg0 = MAX(SIGN(1.0_num,dv),0.0_num) L = L * sg0 * (1.0_num - dvg0) + L * (1.0_num -sg0) * dvg0 +& L * sg0 * dvg0 w1 = (bx1(ix, iy, iz)**2 + by1(ix, iy, iz)**2 + bz1(ix, iy, iz)**2) & / rho(ix, iy, iz) cs = SQRT(gamma*(gamma - 1.0_num) * energy(ix,iy,iz)) cf = SQRT(cs**2 + w1) p_visc(ix, iy, iz) = visc1 * ABS(s) * L*cf * rho(ix, iy, iz) & + visc2 * (s * L)**2 * rho(ix, iy, iz) qxx(ix, iy, iz) = 0.0_num qxy(ix, iy, iz) = 0.0_num qxz(ix, iy, iz) = 0.0_num qyy(ix, iy, iz) = 0.0_num qyz(ix, iy, iz) = 0.0_num qzz(ix, iy, iz) = 0.0_num #ifndef Q_MONO qxy(ix,iy,iz) = sxy * (L2 * rho(ix,iy,iz) & * (visc1 * cf + L2 * visc2 * ABS(s))) qxz(ix,iy,iz) = sxz * (L2 * rho(ix,iy,iz) & * (visc1 * cf + L2 * visc2 * ABS(s))) qyz(ix,iy,iz) = syz * (L2 * rho(ix,iy,iz) & * (visc1 * cf + L2 * visc2 * ABS(s))) qxx(ix,iy,iz) = sxx * (L2 * rho(ix,iy,iz) & * (visc1 * cf + L2 * visc2 * ABS(s))) qyy(ix,iy,iz) = syy * (L2 * rho(ix,iy,iz) & * (visc1 * cf + L2 * visc2 * ABS(s))) qzz(ix,iy,iz) = szz * (L2 * rho(ix,iy,iz) & * (visc1 * cf + L2 * visc2 * ABS(s))) #endif qxy(ix,iy,iz) = qxy(ix,iy,iz) + 2.0_num * sxy * rho(ix,iy,iz) * visc3 qxz(ix,iy,iz) = qxz(ix,iy,iz) + 2.0_num * sxz * rho(ix,iy,iz) * visc3 qyz(ix,iy,iz) = qyz(ix,iy,iz) + 2.0_num * syz * rho(ix,iy,iz) * visc3 qxx(ix,iy,iz) = qxx(ix,iy,iz) + 2.0_num * sxx * rho(ix,iy,iz) * visc3 qyy(ix,iy,iz) = qyy(ix,iy,iz) + 2.0_num * syy * rho(ix,iy,iz) * visc3 qzz(ix,iy,iz) = qzz(ix,iy,iz) + 2.0_num * SZZ * rho(ix,iy,iz) * visc3 visc_heat(ix,iy,iz) = qxy(ix,iy,iz)*dvxy + qxz(ix,iy,iz)*dvxz & + qyz(ix,iy,iz)*dvyz + qxx(ix,iy,iz)*dvxdx & + qyy(ix,iy,iz)*dvydy + qzz(ix,iy,iz)*dvzdz w4 = bx1(ix, iy, iz) * dvxdx & + by1(ix, iy, iz) * dvxdy + bz1(ix, iy, iz) * dvxdz bx1(ix, iy, iz) = (bx1(ix, iy, iz) + w4 * dt2) / (1.0_num + dv) w4 = bx1(ix, iy, iz) * dvydx & + by1(ix, iy, iz) * dvydy + bz1(ix, iy, iz) * dvydz by1(ix, iy, iz) = (by1(ix, iy, iz) + w4 * dt2) / (1.0_num + dv) w4 = bx1(ix, iy, iz) * dvzdx & + by1(ix, iy, iz) * dvzdy + bz1(ix, iy, iz) * dvzdz bz1(ix, iy, iz) = (bz1(ix, iy, iz) + w4 * dt2) / (1.0_num + dv) END DO END DO END DO END SUBROUTINE viscosity_and_b_update SUBROUTINE visc_heating REAL(num) :: vxb, vxbm, vyb, vybm, vzb, vzbm REAL(num) :: dvxdx, dvydy, dvzdz, dvxy, dvxz, dvyz RETURN DO iz = 0, nz+1 izm = iz - 1 izp = iz + 1 DO iy = 0, ny+1 iym = iy - 1 iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx+1 ixm = ix - 1 ixp = ix + 1 ! vx at Sx(i, j, k) vxb = (vx1(ix, iy, iz) + vx1(ix, iym, iz) & + vx1(ix, iy, izm) + vx1(ix, iym, izm)) / 4.0_num ! vx at Sx(i-1, j, k) vxbm = (vx1(ixm, iy, iz) + vx1(ixm, iym, iz) & + vx1(ixm, iy, izm) + vx1(ixm, iym, izm)) / 4.0_num ! vy at Sy(i, j, k) vyb = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iy, izm) + vy1(ixm, iy, izm)) / 4.0_num ! vy at Sy(i, j-1, k) vybm = (vy1(ix, iym, iz) + vy1(ixm, iym, iz) & + vy1(ix, iym, izm) + vy1(ixm, iym, izm)) / 4.0_num ! vz at Sz(i, j, k) vzb = (vz1(ix, iy, iz) + vz1(ixm, iy, iz) & + vz1(ix, iym, iz) + vz1(ixm, iym, iz)) / 4.0_num ! vz at Sz(i, j, k-1) vzbm = (vz1(ix, iy, izm) + vz1(ixm, iy, izm) & + vz1(ix, iym, izm) + vz1(ixm, iym, izm)) / 4.0_num dvxdx = (vxb - vxbm) / dxb(ix) dvydy = (vyb - vybm) / dyb(iy) dvzdz = (vzb - vzbm) / dzb(iz) vxb = (vx1(ix, iy, iz) + vx1(ixm, iy, iz) & + vx1(ix, iy, izm) + vx1(ixm, iy, izm)) / 4.0_num vxbm = (vx1(ix, iym, iz) + vx1(ixm, iym, iz) & + vx1(ix, iym, izm) + vx1(ixm, iym, izm)) / 4.0_num vyb = (vy1(ix, iy, iz) + vy1(ix, iym, iz) & + vy1(ix, iy, izm) + vy1(ix, iym, izm)) / 4.0_num vybm = (vy1(ixm, iy, iz) + vy1(ixm, iym, iz) & + vy1(ixm, iy, izm) + vy1(ixm, iym, izm)) / 4.0_num dvxy = (vxb - vxbm) / dyb(iy) + (vyb - vybm) / dxb(ix) vxb = (vx1(ix, iy, iz) + vx1(ixm, iy, iz) & + vx1(ix, iym, iz) + vx1(ixm, iym, iz)) / 4.0_num vxbm = (vx1(ix, iy, izm) + vx1(ixm, iy, izm) & + vx1(ix, iym, izm) + vx1(ixm, iym, izm)) / 4.0_num vzb = (vz1(ix, iy, iz) + vz1(ix, iym, iz) & + vz1(ix, iy, izm) + vz1(ix, iym, izm)) / 4.0_num vzbm = (vz1(ixm, iy, iz) + vz1(ixm, iym, iz) & + vz1(ixm, iy, izm) + vz1(ixm, iym, izm)) / 4.0_num dvxz = (vxb - vxbm) / dzb(iz) + (vzb - vzbm) / dxb(ix) vyb = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iym, iz) + vy1(ixm, iym, iz)) / 4.0_num vybm = (vy1(ix, iy, izm) + vy1(ixm, iy, izm) & + vy1(ix, iym, izm) + vy1(ixm, iym, izm)) / 4.0_num vzb = (vz1(ix, iy, iz) + vz1(ixm, iy, iz) & + vz1(ix, iy, izm) + vz1(ixm, iy, izm)) / 4.0_num vzbm = (vz1(ix, iym, iz) + vz1(ixm, iym, iz) & + vz1(ix, iym, izm) + vz1(ixm, iym, izm)) / 4.0_num dvyz = (vyb - vybm) / dzb(iz) + (vzb - vzbm) / dyb(iy) visc_heat(ix, iy, iz) = & qxy(ix, iy, iz) * dvxy + qxz(ix, iy, iz) * dvxz & + qyz(ix, iy, iz) * dvyz + qxx(ix, iy, iz) * dvxdx & + qyy(ix, iy, iz) * dvydy + qzz(ix, iy, iz) * dvzdz END DO END DO END DO END SUBROUTINE visc_heating SUBROUTINE eta_calc REAL(num) :: jx, jy, jz, jxxp, jyyp, jzzp REAL(num) :: modj eta = 0.0_num DO iz = -1, nz+1 izp = iz + 1 DO iy = -1, ny+1 iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = -1, nx+1 ixp = ix + 1 ! jx at E3(i, j) jx = (bz(ix, iyp, iz) - bz(ix, iy, iz)) / dyc(iy) & - (by(ix, iy, izp) - by(ix, iy, iz)) / dzc(iz) ! jx at E3(i+1, j) jxxp = (bz(ixp, iyp, iz) - bz(ixp, iy, iz)) / dyc(iy) & - (by(ixp, iy, izp) - by(ixp, iy, iz)) / dzc(iz) ! jy at E2(i, j) jy = (bx(ix, iy, izp) - bx(ix, iy, iz)) / dzc(iz) & - (bz(ixp, iy, iz) - bz(ix, iy, iz)) / dxc(ix) ! jy at E2(i, j+1) jyyp = (bx(ix, iyp, izp) - bx(ix, iyp, iz)) / dzc(iz) & - (bz(ixp, iyp, iz) - bz(ix, iyp, iz)) / dxc(ix) ! jz at E1(i, j) jz = (by(ixp, iy, iz) - by(ix, iy, iz)) / dxc(ix) & - (bx(ix, iyp, iz) - bx(ix, iy, iz)) / dyc(iy) ! jz at E1(i, j) jzzp = (by(ixp, iy, izp) - by(ix, iy, izp)) / dxc(ix) & - (bx(ix, iyp, izp) - bx(ix, iy, izp)) / dyc(iy) ! current at V w4 = (jx + jxxp) / 2.0_num w5 = (jy + jyyp) / 2.0_num w6 = (jz + jzzp) / 2.0_num modj = SQRT(w4**2 + w5**2 + w6**2) IF (modj >= j_max) THEN eta(ix, iy, iz) = eta_background + eta0 ELSE eta(ix, iy, iz) = eta_background END IF END DO END DO END DO IF (.NOT. resistive_mhd) eta = 0.0_num END SUBROUTINE eta_calc ! Calculate the effect of resistivity on the magnetic field and Ohmic heating ! Use the subroutine rkstep SUBROUTINE resistive_effects REAL(num) :: dt6 REAL(num) :: jx1, jx2, jy1, jy2, jz1, jz2 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: k1x, k2x, k3x, k4x REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: k1y, k2y, k3y, k4y REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: k1z, k2z, k3z, k4z REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: c1, c2, c3, c4 ALLOCATE(k1x(0:nx, 0:ny, 0:nz), k2x(0:nx, 0:ny, 0:nz)) ALLOCATE(k3x(0:nx, 0:ny, 0:nz), k4x(0:nx, 0:ny, 0:nz)) ALLOCATE(k1y(0:nx, 0:ny, 0:nz), k2y(0:nx, 0:ny, 0:nz)) ALLOCATE(k3y(0:nx, 0:ny, 0:nz), k4y(0:nx, 0:ny, 0:nz)) ALLOCATE(k1z(0:nx, 0:ny, 0:nz), k2z(0:nx, 0:ny, 0:nz)) ALLOCATE(k3z(0:nx, 0:ny, 0:nz), k4z(0:nx, 0:ny, 0:nz)) ALLOCATE(c1(0:nx, 0:ny, 0:nz), c2(0:nx, 0:ny, 0:nz)) ALLOCATE(c3(0:nx, 0:ny, 0:nz), c4(0:nx, 0:ny, 0:nz)) dt = dt / 2.0_num bx1 = bx(0:nx+1, 0:ny+1, 0:nz+1) by1 = by(0:nx+1, 0:ny+1, 0:nz+1) bz1 = bz(0:nx+1, 0:ny+1, 0:nz+1) ! step 1 CALL rkstep k1x = flux_x k1y = flux_y k1z = flux_z c1 = curlb #ifdef Q_FIRSTORDER dt6 = dt k3x = k1x k3y = k1y k3z = k1z #else ! step 2 DO iz = 1, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx bx(ix, iy, iz) = bx1(ix, iy, iz) & + (k1z(ix, iy, iz) - k1z(ix, iy-1, iz)) * dt / dyb(iy) & - (k1y(ix, iy, iz) - k1y(ix, iy, iz-1)) * dt / dzb(iz) END DO END DO END DO DO iz = 1, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx by(ix, iy, iz) = by1(ix, iy, iz) & + (-k1z(ix, iy, iz) + k1z(ix-1, iy, iz)) * dt / dxb(ix) & + (k1x(ix, iy, iz) - k1x(ix, iy, iz-1)) * dt / dzb(iz) END DO END DO END DO DO iz = 0, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx bz(ix, iy, iz) = bz1(ix, iy, iz) & + (k1y(ix, iy, iz) - k1z(ix-1, iy, iz)) * dt / dxb(ix) & - (k1x(ix, iy, iz) - k1x(ix, iy-1, iz)) * dt / dyb(iy) END DO END DO END DO CALL bfield_bcs CALL rkstep k2x = flux_x k2y = flux_y k2z = flux_z c2 = curlb ! step 3 DO iz = 1, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx bx(ix, iy, iz) = bx1(ix, iy, iz) & + (k2z(ix, iy, iz) - k2z(ix, iy-1, iz)) * dt / dyb(iy) & - (k2y(ix, iy, iz) - k2y(ix, iy, iz-1)) * dt / dzb(iz) END DO END DO END DO DO iz = 1, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx by(ix, iy, iz) = by1(ix, iy, iz) & + (-k2z(ix, iy, iz) + k2z(ix-1, iy, iz)) * dt / dxb(ix) & + (k2x(ix, iy, iz) - k2x(ix, iy, iz-1)) * dt / dzb(iz) END DO END DO END DO DO iz = 0, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx bz(ix, iy, iz) = bz1(ix, iy, iz) & + (k2y(ix, iy, iz) - k2z(ix-1, iy, iz)) * dt / dxb(ix) & - (k2x(ix, iy, iz) - k2x(ix, iy-1, iz)) * dt / dyb(iy) END DO END DO END DO CALL bfield_bcs CALL rkstep k3x = flux_x k3y = flux_y k3z = flux_z c3 = curlb dt = 2.0_num * dt ! step 4 DO iz = 1, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx bx(ix, iy, iz) = bx1(ix, iy, iz) & + (k3z(ix, iy, iz) - k3z(ix, iy-1, iz)) * dt / dyb(iy) & - (k3y(ix, iy, iz) - k3y(ix, iy, iz-1)) * dt / dzb(iz) END DO END DO END DO DO iz = 1, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx by(ix, iy, iz) = by1(ix, iy, iz) & + (-k3z(ix, iy, iz) + k3z(ix-1, iy, iz)) * dt / dxb(ix) & + (k3x(ix, iy, iz) - k3x(ix, iy, iz-1)) * dt / dzb(iz) END DO END DO END DO DO iz = 0, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx bz(ix, iy, iz) = bz1(ix, iy, iz) & + (k3y(ix, iy, iz) - k3z(ix-1, iy, iz)) * dt / dxb(ix) & - (k3x(ix, iy, iz) - k3x(ix, iy-1, iz)) * dt / dyb(iy) END DO END DO END DO CALL bfield_bcs CALL rkstep k4x = flux_x k4y = flux_y k4z = flux_z c4 = curlb ! full update dt6 = dt / 6.0_num k3x = k1x + 2.0_num * k2x + 2.0_num * k3x + k4x k3y = k1y + 2.0_num * k2y + 2.0_num * k3y + k4y k3z = k1z + 2.0_num * k2z + 2.0_num * k3z + k4z c1 = c1 + 2.0_num * c2 + 2.0_num * c3 + c4 #endif DO iz = 1, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx bx(ix, iy, iz) = bx1(ix, iy, iz) & + (k3z(ix, iy, iz) - k3z(ix, iy-1, iz)) * dt6 / dyb(iy) & - (k3y(ix, iy, iz) - k3y(ix, iy, iz-1)) * dt6 / dzb(iz) END DO END DO END DO DO iz = 1, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx by(ix, iy, iz) = by1(ix, iy, iz) & + (-k3z(ix, iy, iz) + k3z(ix-1, iy, iz)) * dt6 / dxb(ix) & + (k3x(ix, iy, iz) - k3x(ix, iy, iz-1)) * dt6 / dzb(iz) END DO END DO END DO DO iz = 0, nz DO iy = 1, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 1, nx bz(ix, iy, iz) = bz1(ix, iy, iz) & + (k3y(ix, iy, iz) - k3z(ix-1, iy, iz)) * dt6 / dxb(ix) & - (k3x(ix, iy, iz) - k3x(ix, iy-1, iz)) * dt6 / dyb(iy) END DO END DO END DO CALL bfield_bcs DO iz = 1, nz izm = iz - 1 DO iy = 1, ny iym = iy - 1 DO ix = 1, nx ixm = ix - 1 energy(ix, iy, iz) = energy(ix, iy, iz) & + (c1(ix, iy, iz) + c1(ixm, iy, iz) & + c1(ix, iym, iz) + c1(ix, iy, izm) & + c1(ixm, iym, iz) + c1(ixm, iy, izm) & + c1(ix, iym, izm) + c1(ixm, iym, izm)) & * dt6 / (8.0_num * rho(ix, iy, iz)) END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 ! jx at E3(i, j) jx1 = (bz(ix, iyp, iz) - bz(ix, iy, iz)) / dyc(iy) & - (by(ix, iy, izp) - by(ix, iy, iz)) / dzc(iz) ! jx at E3(i+1, j) jx2 = (bz(ixp, iyp, iz) - bz(ixp, iy, iz)) / dyc(iy) & - (by(ixp, iy, izp) - by(ixp, iy, iz)) / dzc(iz) ! jy at E2(i, j) jy1 = (bx(ix, iy, izp) - bx(ix, iy, iz)) / dzc(iz) & - (bz(ixp, iy, iz) - bz(ix, iy, iz)) / dxc(ix) ! jy at E2(i, j+1) jy2 = (bx(ix, iyp, izp) - bx(ix, iyp, iz)) / dzc(iz) & - (bz(ixp, iyp, iz) - bz(ix, iyp, iz)) / dxc(ix) ! jz at E1(i, j) jz1 = (by(ixp, iy, iz) - by(ix, iy, iz)) / dxc(ix) & - (bx(ix, iyp, iz) - bx(ix, iy, iz)) / dyc(iy) ! jz at E1(i, j) jz2 = (by(ixp, iy, izp) - by(ix, iy, izp)) / dxc(ix) & - (bx(ix, iyp, izp) - bx(ix, iy, izp)) / dyc(iy) jx_r(ix, iy, iz) = (jx1 + jx2) / 2.0_num jy_r(ix, iy, iz) = (jy1 + jy2) / 2.0_num jz_r(ix, iy, iz) = (jz1 + jz2) / 2.0_num END DO END DO END DO CALL energy_bcs DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx w1 = dt6 * dxc(ix) * dyc(iy) * dzc(iz) * c1(ix, iy, iz) IF ((ix == 0) .OR. (ix == nx)) THEN w1 = w1 * 0.5_num END IF IF ((iy == 0) .OR. (iy == ny)) THEN w1 = w1 * 0.5_num END IF IF ((iz == 0) .OR. (iz == nz)) THEN w1 = w1 * 0.5_num END IF total_ohmic_heating = total_ohmic_heating + w1 END DO END DO END DO ! Once more to get j_perp and j_par correct CALL rkstep DEALLOCATE(k1x, k2x, k3x, k4x, k1y, k2y, k3y, k4y, k1z, k2z, k3z, k4z) DEALLOCATE(c1, c2, c3, c4) END SUBROUTINE resistive_effects ! calculates 'k' values from b[xyz]1 values SUBROUTINE rkstep REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: jx, jy, jz REAL(num) :: jx1, jy1, jz1, jx2, jy2, jz2 REAL(num) :: bxv, byv, bzv REAL(num) :: magn_b REAL(num) :: j_par_x, j_par_y, j_par_z REAL(num) :: j_perp_x, j_perp_y, j_perp_z REAL(num) :: magn_j_perp, magn_j_par ALLOCATE(jx(-1:nx+1, -1:ny+1, -1:nz+1), & jy(-1:nx+1, -1:ny+1, -1:nz+1), jz(-1:nx+1, -1:ny+1, -1:nz+1)) DO iz = -1, nz+1 izp = iz + 1 DO iy = -1, ny+1 iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = -1, nx+1 ixp = ix + 1 ! jx at E3(i, j) jx1 = (bz(ix, iyp, iz) - bz(ix, iy, iz)) / dyc(iy) & - (by(ix, iy, izp) - by(ix, iy, iz)) / dzc(iz) ! jx at E3(i+1, j) jx2 = (bz(ixp, iyp, iz) - bz(ixp, iy, iz)) / dyc(iy) & - (by(ixp, iy, izp) - by(ixp, iy, iz)) / dzc(iz) ! jy at E2(i, j) jy1 = (bx(ix, iy, izp) - bx(ix, iy, iz)) / dzc(iz) & - (bz(ixp, iy, iz) - bz(ix, iy, iz)) / dxc(ix) ! jy at E2(i, j+1) jy2 = (bx(ix, iyp, izp) - bx(ix, iyp, iz)) / dzc(iz) & - (bz(ixp, iyp, iz) - bz(ix, iyp, iz)) / dxc(ix) ! jz at E1(i, j) jz1 = (by(ixp, iy, iz) - by(ix, iy, iz)) / dxc(ix) & - (bx(ix, iyp, iz) - bx(ix, iy, iz)) / dyc(iy) ! jz at E1(i, j) jz2 = (by(ixp, iy, izp) - by(ix, iy, izp)) / dxc(ix) & - (bx(ix, iyp, izp) - bx(ix, iy, izp)) / dyc(iy) jx(ix, iy, iz) = (jx1 + jx2) / 2.0_num jy(ix, iy, iz) = (jy1 + jy2) / 2.0_num jz(ix, iy, iz) = (jz1 + jz2) / 2.0_num END DO END DO END DO IF (.NOT. cowling_resistivity) THEN ! Use simple flux calculation DO iz = 0, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx flux_x(ix, iy, iz) = -jx(ix, iy, iz) & * eta(ix, iy, iz) * dxc(ix) / 2.0_num flux_y(ix, iy, iz) = -jy(ix, iy, iz) & * eta(ix, iy, iz) * dyc(iy) / 2.0_num flux_z(ix, iy, iz) = -jz(ix, iy, iz) & * eta(ix, iy, iz) * dzc(iz) / 2.0_num ! This isn't really curlb. It's actually heat flux curlb(ix, iy, iz) = eta(ix, iy, iz) & * (jx(ix, iy, iz)**2 + jy(ix, iy, iz)**2 + jz(ix, iy, iz)**2) END DO END DO END DO ELSE ! Use partially ionised flux calculation DO iz = 0, nz DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 ! B at vertices bxv = (bx(ix, iy, iz) + bx(ix, iyp, iz) + bx(ix, iy, izp) & + bx(ix, iyp, izp)) / 4.0_num byv = (by(ix, iy, iz) + by(ixp, iy, iz) + by(ix, iy, izp) & + by(ixp, iy, izp)) / 4.0_num bzv = (bz(ix, iy, iz) + bz(ixp, iy, iz) + bz(ix, iyp, iz) & + bz(ixp, iyp, iz)) / 4.0_num magn_b = bxv**2 + byv**2 + bzv**2 ! Calculate parallel and perpendicular currents j_par_x = (jx(ix, iy, iz) * bxv + jy(ix, iy, iz) * byv & + jz(ix, iy, iz) * bzv) * bxv / MAX(magn_b, none_zero) j_par_y = (jx(ix, iy, iz) * bxv + jy(ix, iy, iz) * byv & + jz(ix, iy, iz) * bzv) * byv / MAX(magn_b, none_zero) j_par_z = (jx(ix, iy, iz) * bxv + jy(ix, iy, iz) * byv & + jz(ix, iy, iz) * bzv) * bzv / MAX(magn_b, none_zero) ! If b = 0 then there is no parallel current IF (magn_b .LT. none_zero) THEN j_par_x = 0.0_num j_par_y = 0.0_num j_par_z = 0.0_num END IF ! Calculate perpendicular current j_perp_x = jx(ix, iy, iz) - j_par_x j_perp_y = jy(ix, iy, iz) - j_par_y j_perp_z = jz(ix, iy, iz) - j_par_z magn_j_par = SQRT(j_par_x**2 + j_par_y**2 + j_par_z**2) magn_j_perp = SQRT(j_perp_x**2 + j_perp_y**2 + j_perp_z**2) parallel_current(ix, iy, iz) = magn_j_par perp_current(ix, iy, iz) = magn_j_perp ! This isn't really curlb. It's actually heat flux curlb(ix, iy, iz) = eta(ix, iy, iz) * magn_j_par**2 & + (eta_perp(ix, iy, iz) + eta(ix, iy, iz)) * magn_j_perp**2 flux_x(ix, iy, iz) = -((j_par_x * eta(ix, iy, iz) & + j_perp_x * (eta_perp(ix, iy, iz) + eta(ix, iy, iz))) & * dxc(ix) / 2.0_num) flux_y(ix, iy, iz) = -((j_par_y * eta(ix, iy, iz) & + j_perp_y * (eta_perp(ix, iy, iz) + eta(ix, iy, iz))) & * dyc(iy) / 2.0_num) flux_z(ix, iy, iz) = -((j_par_z * eta(ix, iy, iz) & + j_perp_z * (eta_perp(ix, iy, iz) + eta(ix, iy, iz)))) END DO END DO END DO END IF DEALLOCATE (jx, jy, jz) END SUBROUTINE rkstep SUBROUTINE store_boundary_dv REAL(num) :: dvx, dvy, dvz IF (xbc_right == BC_OPEN .AND. right == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO iy = -2, ny+2 dvx = 2.0_num * (vx(nx, iy, iz) - vx1(nx, iy, iz)) dvy = 2.0_num * (vy(nx, iy, iz) - vy1(nx, iy, iz)) dvz = 2.0_num * (vz(nx, iy, iz) - vz1(nx, iy, iz)) dv_right(iy, iz) = SQRT(dvx**2 + dvy**2 + dvz**2) END DO END DO END IF IF (xbc_left == BC_OPEN .AND. left == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO iy = -2, ny+2 dvx = 2.0_num * (vx(0, iy, iz) - vx1(0, iy, iz)) dvy = 2.0_num * (vy(0, iy, iz) - vy1(0, iy, iz)) dvz = 2.0_num * (vz(0, iy, iz) - vz1(0, iy, iz)) dv_left(iy, iz) = SQRT(dvx**2 + dvy**2 + dvz**2) END DO END DO END IF IF (ybc_up == BC_OPEN .AND. up == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO ix = -2, nx+2 dvx = 2.0_num * (vx(ix, ny, iz) - vx1(ix, ny, iz)) dvy = 2.0_num * (vy(ix, ny, iz) - vy1(ix, ny, iz)) dvz = 2.0_num * (vz(ix, ny, iz) - vz1(ix, ny, iz)) dv_up(ix, iz) = SQRT(dvx**2 + dvy**2 + dvz**2) END DO END DO END IF IF (ybc_down == BC_OPEN .AND. down == MPI_PROC_NULL) THEN DO iz = -2, nz+2 DO ix = -2, nx+2 dvx = 2.0_num * (vx(ix, 0, iz) - vx1(ix, 0, iz)) dvy = 2.0_num * (vy(ix, 0, iz) - vy1(ix, 0, iz)) dvz = 2.0_num * (vz(ix, 0, iz) - vz1(ix, 0, iz)) dv_down(ix, iz) = SQRT(dvx**2 + dvy**2 + dvz**2) END DO END DO END IF IF (zbc_back == BC_OPEN .AND. back == MPI_PROC_NULL) THEN DO iy = -2, ny+2 DO ix = -2, nx+2 dvx = 2.0_num * (vx(ix, iy, nz) - vx1(ix, iy, nz)) dvy = 2.0_num * (vy(ix, iy, nz) - vy1(ix, iy, nz)) dvz = 2.0_num * (vz(ix, iy, nz) - vz1(ix, iy, nz)) dv_back(ix, iy) = SQRT(dvx**2 + dvy**2 + dvz**2) END DO END DO END IF IF (zbc_front == BC_OPEN .AND. front == MPI_PROC_NULL) THEN DO iy = -2, ny+2 DO ix = -2, nx+2 dvx = 2.0_num * (vx(ix, iy, 0) - vx1(ix, iy, 0)) dvy = 2.0_num * (vy(ix, iy, 0) - vy1(ix, iy, 0)) dvz = 2.0_num * (vz(ix, iy, 0) - vz1(ix, iy, 0)) dv_front(ix, iy) = SQRT(dvx**2 + dvy**2 + dvz**2) END DO END DO END IF END SUBROUTINE store_boundary_dv END MODULE lagran Lare3d/src/core/lare3d.f90000644 000765 000024 00000004556 11430520723 015504 0ustar00Tonystaff000000 000000 PROGRAM lare3d USE shared_data USE initial_conditions USE setup USE boundary USE openboundary USE diagnostics USE lagran USE remap USE mpi_routines USE welcome USE normalise USE eos USE neutral USE control IMPLICIT NONE INTEGER :: i = 0 CALL MPI_INIT(errcode) CALL before_control ! setup.F90 CALL user_normalisation ! control.f90 CALL control_variables ! control.f90 CALL set_output_dumps ! control.f90 CALL mpi_initialise ! mpi_routines.f90 CALL after_control ! setup.f90 ! CALL welcome_message ! welcome.f90 CALL setup_neutral ! neutral.f90 IF (.NOT. SI) THEN CALL normalise_neutral ! setup.f90 END IF CALL set_normalisation ! normalise.f90 CALL set_boundary_conditions ! boundary.f90 CALL open_files ! setup.f90 CALL grid ! setup.f90 IF (IAND(initial, IC_RESTART) .NE. 0) THEN CALL restart_data ! setup.f90 restart = .TRUE. END IF IF (IAND(initial, IC_NEW) .NE. 0) THEN CALL set_initial_conditions ! initial_conditions.f90 END IF ! Initial conditions, parameters etc. specified in SI IF (SI) THEN ! Normalise everything CALL normalise_code ! setup.f90 END IF CALL set_boundary_conditions ! boundary.f90 CALL boundary_conditions ! boundary.f90 CALL eta_calc ! lagran.f90 IF (include_neutrals) CALL neutral_fraction(eos_number) ! neutral.f90 IF (cowling_resistivity) CALL perpendicular_resistivity ! neutral.f90 ! IF (rank .EQ. 0) PRINT *, "Initial conditions setup OK. Running Code" CALL output_routines(i) ! diagnostics.f90 DO IF ((i >= nsteps .AND. nsteps >= 0) .OR. (time >= t_end)) EXIT i = i + 1 CALL eta_calc ! lagran.f90 CALL set_dt ! diagnostics.f90 CALL lagrangian_step ! lagran.f90 CALL eulerian_remap(i) ! remap.f90 IF (rke) CALL energy_correction ! diagnostics.f90 IF (any_open) THEN CALL open_bcs ! openboundary.f90 END IF CALL output_routines(i) ! diagnostics.f90 END DO ! IF (rank .EQ. 0) PRINT *, "Code Terminated normally" CALL mpi_close ! mpi_routines.f90 CALL close_files ! setup.f90 CALL MPI_FINALIZE(errcode) END PROGRAM lare3d Lare3d/src/core/mpi_routines.f90000644 000765 000024 00000012100 11406627300 017031 0ustar00Tonystaff000000 000000 MODULE mpi_routines USE shared_data IMPLICIT NONE PRIVATE PUBLIC :: mpi_initialise, mpi_close REAL(dbl) :: start_time, end_time CONTAINS SUBROUTINE mpi_initialise INTEGER :: ndims, dims(3) LOGICAL :: periods(3), reorder INTEGER :: starts(3), sizes(3), subsizes(3) CALL MPI_COMM_SIZE(MPI_COMM_WORLD, nproc, errcode) ndims = 3 dims = (/ nprocz, nprocy, nprocx /) IF (MAX(dims(1), 1)*MAX(dims(2), 1)*MAX(dims(3), 1) .GT. nproc) THEN dims = 0 IF (rank .EQ. 0) THEN PRINT *, "Too many processors requested in override." PRINT *, "Reverting to automatic decomposition." PRINT *, "**" PRINT *, "" END IF END IF CALL MPI_DIMS_CREATE(nproc, ndims, dims, errcode) nprocx = dims(3) nprocy = dims(2) nprocz = dims(1) nx = nx_global / nprocx ny = ny_global / nprocy nz = nz_global / nprocz periods = .TRUE. reorder = .TRUE. IF (xbc_left == BC_OTHER) periods(3) = .FALSE. IF (ybc_up == BC_OTHER) periods(2) = .FALSE. IF (zbc_front == BC_OTHER) periods(1) = .FALSE. IF (xbc_left == BC_OPEN) periods(3) = .FALSE. IF (ybc_up == BC_OPEN) periods(2) = .FALSE. IF (zbc_front == BC_OPEN) periods(1) = .FALSE. IF (xbc_right == BC_OTHER) periods(3) = .FALSE. IF (ybc_down == BC_OTHER) periods(2) = .FALSE. IF (zbc_back == BC_OTHER) periods(1) = .FALSE. IF (xbc_right == BC_OPEN) periods(3) = .FALSE. IF (ybc_down == BC_OPEN) periods(2) = .FALSE. IF (zbc_back == BC_OPEN) periods(1) = .FALSE. CALL MPI_CART_CREATE(MPI_COMM_WORLD, ndims, dims, periods, & reorder, comm, errcode) CALL MPI_COMM_RANK(comm, rank, errcode) CALL MPI_CART_COORDS(comm, rank, 3, coordinates, errcode) CALL MPI_CART_SHIFT(comm, 2, 1, left, right, errcode) CALL MPI_CART_SHIFT(comm, 1, 1, down, up, errcode) CALL MPI_CART_SHIFT(comm, 0, 1, front, back, errcode) ! Create the subarray for this problem: subtype decribes where this ! process's data fits into the global picture. ! set up the starting point for my subgrid (assumes arrays start at 0) starts(1) = coordinates(3) * nx starts(2) = coordinates(2) * ny starts(3) = coordinates(1) * nz ! the grid sizes subsizes = (/ nx+1, ny+1, nz+1 /) sizes = (/ nx_global+1, ny_global+1, nz_global+1 /) ! set up and commit the subarray type CALL MPI_TYPE_CREATE_SUBARRAY(ndims, sizes, subsizes, starts, & MPI_ORDER_FORTRAN, mpireal, subtype, errcode) CALL MPI_TYPE_COMMIT(subtype, errcode) ! Calculate initial displacement value: ! nx, ny, nz, (xb, yb, zb, time) * size of float initialdisp = 12 + (nx_global + ny_global + 3) * num ALLOCATE(rho(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(energy(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(vx(-2:nx+2, -2:ny+2, -2:nz+2)) ALLOCATE(vy(-2:nx+2, -2:ny+2, -2:nz+2)) ALLOCATE(vz(-2:nx+2, -2:ny+2, -2:nz+2)) ALLOCATE(vx1(-2:nx+2, -2:ny+2, -2:nz+2)) ALLOCATE(vy1(-2:nx+2, -2:ny+2, -2:nz+2)) ALLOCATE(vz1(-2:nx+2, -2:ny+2, -2:nz+2)) ALLOCATE(bx(-2:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(by(-1:nx+2, -2:ny+2, -1:nz+2)) ALLOCATE(bz(-1:nx+2, -1:ny+2, -2:nz+2)) ALLOCATE(delta_ke(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(p_visc(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(eta(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(lambda_i(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(bzone(-1:nx+2, -1:ny+2, -1:nz+2)) ! shocked and resistive need to be larger to allow offset = 4 in shock_test ALLOCATE(cv(-1:nx+2, -1:ny+2, -1:nz+2), cv1(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(xc(-1:nx+2), xb(-2:nx+2), dxb(-1:nx+2), dxc(-1:nx+2)) ALLOCATE(yc(-1:ny+2), yb(-2:ny+2), dyb(-1:ny+2), dyc(-1:ny+2)) ALLOCATE(zc(-1:nz+2), zb(-2:nz+2), dzb(-1:nz+2), dzc(-1:nz+2)) ALLOCATE(grav(-1:nz+2)) ALLOCATE(jx_r(0:nx+1, 0:ny+1, 0:nz+1)) ALLOCATE(jy_r(0:nx+1, 0:ny+1, 0:nz+1)) ALLOCATE(jz_r(0:nx+1, 0:ny+1, 0:nz+1)) ALLOCATE(dv_left(-2:ny+2, -2:nz+2), dv_right(-2:ny+2, -2:nz+2)) ALLOCATE(dv_up(-2:nx+2, -2:nz+2), dv_down(-2:nx+2, -2:nz+2)) ALLOCATE(dv_back(-2:nx+2, -2:ny+2), dv_front(-2:nx+2, -2:ny+2)) IF (rank == 0) start_time = MPI_WTIME() p_visc = 0.0_num eta = 0.0_num END SUBROUTINE mpi_initialise SUBROUTINE mpi_close INTEGER :: seconds, minutes, hours, total IF (rank == 0) THEN end_time = MPI_WTIME() total = INT(end_time - start_time) seconds = MOD(total, 60) minutes = MOD(total / 60, 60) hours = total / 3600 WRITE(20, *) WRITE(20, '("runtime = ", i4, "h ", i2, "m ", i2, & & "s on ", i4, " process elements.")') hours, minutes, seconds, nproc END IF CALL MPI_BARRIER(comm, errcode) DEALLOCATE(rho, energy) DEALLOCATE(vx, vy, vz) DEALLOCATE(vx1, vy1, vz1) DEALLOCATE(bx, by, bz) DEALLOCATE(delta_ke, p_visc) DEALLOCATE(eta, lambda_i, bzone) DEALLOCATE(cv, cv1) DEALLOCATE(xc, xb, dxb, dxc) DEALLOCATE(yc, yb, dyb, dyc) DEALLOCATE(grav) DEALLOCATE(jx_r, jy_r, jz_r) END SUBROUTINE mpi_close END MODULE mpi_routines Lare3d/src/core/mpiboundary.f90000644 000765 000024 00000027240 11406627300 016660 0ustar00Tonystaff000000 000000 MODULE mpiboundary USE shared_data IMPLICIT NONE CONTAINS SUBROUTINE bfield_MPI CALL MPI_SENDRECV(bx(1:nx, 1:ny, 1:2), 2*nx*ny, mpireal, front, tag, & bx(1:nx, 1:ny, nz+1:nz+2), 2*nx*ny, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(bx(1:nx, 1:ny, nz-1:nz), 2*nx*ny, mpireal, back, tag, & bx(1:nx, 1:ny, -1:0), 2*nx*ny, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(by(1:nx, 1:ny, 1:2), 2*nx*ny, mpireal, front, tag, & by(1:nx, 1:ny, nz+1:nz+2), 2*nx*ny, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(by(1:nx, 1:ny, nz-1:nz), 2*nx*ny, mpireal, back, tag, & by(1:nx, 1:ny, -1:0), 2*nx*ny, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(bz(1:nx, 1:ny, 1:2), 2*nx*ny, mpireal, front, tag, & bz(1:nx, 1:ny, nz+1:nz+2), 2*nx*ny, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(bz(1:nx, 1:ny, nz-2:nz), 3*nx*ny, mpireal, back, tag, & bz(1:nx, 1:ny, -2:0), 3*nx*ny, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(bx(1:2, 1:ny, :), 2*ny*(nz+4), mpireal, left, tag, & bx(nx+1:nx+2, 1:ny, :), 2*ny*(nz+4), mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(bx(nx-2:nx, 1:ny, :), 3*ny*(nz+4), mpireal, right, tag, & bx(-2:0, 1:ny, :), 3*ny*(nz+4), mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(by(1:2, 1:ny, :), 2*ny*(nz+4), mpireal, left, tag, & by(nx+1:nx+2, 1:ny, :), 2*ny*(nz+4), mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(by(nx-1:nx, 1:ny, :), 2*ny*(nz+4), mpireal, right, tag, & by(-1:0, 1:ny, :), 2*ny*(nz+4), mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(bz(1:2, 1:ny, :), 2*ny*(nz+5), mpireal, left, tag, & bz(nx+1:nx+2, 1:ny, :), 2*ny*(nz+5), mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(bz(nx-1:nx, 1:ny, :), 2*ny*(nz+5), mpireal, right, tag, & bz(-1:0, 1:ny, :), 2*ny*(nz+5), mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(bx(:, ny-1:ny, :), 2*(nx+5)*(nz+4), mpireal, up, tag, & bx(:, -1:0, :), 2*(nx+5)*(nz+4), mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(bx(:, 1:2, :), 2*(nx+5)*(nz+4), mpireal, down, tag, & bx(:, ny+1:ny+2, :), 2*(nx+5)*(nz+4), mpireal, up, tag, comm, & status, errcode) CALL MPI_SENDRECV(by(:, ny-2:ny, :), 3*(nx+4)*(nz+4), mpireal, up, tag, & by(:, -2:0, :), 3*(nx+4)*(nz+4), mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(by(:, 1:2, :), 2*(nx+4)*(nz+4), mpireal, down, tag, & by(:, ny+1:ny+2, :), 2*(nx+4)*(nz+4), mpireal, up, tag, comm, & status, errcode) CALL MPI_SENDRECV(bz(:, ny-1:ny, :), 2*(nx+4)*(nz+5), mpireal, up, tag, & bz(:, -1:0, :), 2*(nx+4)*(nz+5), mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(bz(:, 1:2, :), 2*(nx+4)*(nz+5), mpireal, down, tag, & bz(:, ny+1:ny+2, :), 2*(nx+4)*(nz+5), mpireal, up, tag, comm, & status, errcode) END SUBROUTINE bfield_MPI SUBROUTINE energy_MPI CALL MPI_SENDRECV(energy(1:nx, 1:ny, 1:2), 2*nx*ny, mpireal, front, tag, & energy(1:nx, 1:ny, nz+1:nz+2), 2*nx*ny, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(energy(1:nx, 1:ny, nz-1:nz), 2*nx*ny, mpireal, back, & tag, energy(1:nx, 1:ny, -1:0), 2*nx*ny, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(energy(1:2, 1:ny, :), 2*ny*(nz+4), mpireal, left, tag, & energy(nx+1:nx+2, 1:ny, :), 2*ny*(nz+4), mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(energy(nx-1:nx, 1:ny, :), 2*ny*(nz+4), mpireal, right, & tag, energy(-1:0, 1:ny, :), 2*ny*(nz+4), mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(energy(:, ny-1:ny, :), 2*(nx+4)*(nz+4), mpireal, up, & tag, energy(:, -1:0, :), 2*(nx+4)*(nz+4), mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(energy(:, 1:2, :), 2*(nx+4)*(nz+4), mpireal, down, tag, & energy(:, ny+1:ny+2, :), 2*(nx+4)*(nz+4), mpireal, up, tag, comm, & status, errcode) END SUBROUTINE energy_MPI SUBROUTINE velocity_MPI INTEGER :: x_extent, y_extent, z_extent INTEGER :: xy_buf, xz_buf, yz_buf x_extent = nx + 5 y_extent = ny + 5 z_extent = nz + 5 xy_buf = x_extent * y_extent xz_buf = x_extent * z_extent yz_buf = y_extent * z_extent CALL MPI_SENDRECV(vx(:, :, 1:2), 2*xy_buf, mpireal, front, tag, & vx(:, :, nz+1:nz+2), 2*xy_buf, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx(:, :, nz-2:nz), 3*xy_buf, mpireal, back, tag, & vx(:, :, -2:0), 3*xy_buf, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy(:, :, 1:2), 2*xy_buf, mpireal, front, tag, & vy(:, :, nz+1:nz+2), 2*xy_buf, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy(:, :, nz-2:nz), 3*xy_buf, mpireal, back, tag, & vy(:, :, -2:0), 3*xy_buf, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz(:, :, 1:2), 2*xy_buf, mpireal, front, tag, & vz(:, :, nz+1:nz+2), 2*xy_buf, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz(:, :, nz-2:nz), 3*xy_buf, mpireal, back, tag, & vz(:, :, -2:0), 3*xy_buf, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx(1:2, :, :), 2*yz_buf, mpireal, left, tag, & vx(nx+1:nx+2, :, :), 2*yz_buf, mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx(nx-2:nx, :, :), 3*yz_buf, mpireal, right, tag, & vx(-2:0, :, :), 3*yz_buf, mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy(1:2, :, :), 2*yz_buf, mpireal, left, tag, & vy(nx+1:nx+2, :, :), 2*yz_buf, mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy(nx-2:nx, :, :), 3*yz_buf, mpireal, right, tag, & vy(-2:0, :, :), 3*yz_buf, mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz(1:2, :, :), 2*yz_buf, mpireal, left, tag, & vz(nx+1:nx+2, :, :), 2*yz_buf, mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz(nx-2:nx, :, :), 3*yz_buf, mpireal, right, tag, & vz(-2:0, :, :), 3*yz_buf, mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx(:, ny-2:ny, :), 3*xz_buf, mpireal, up, tag, & vx(:, -2:0, :), 3*xz_buf, mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx(:, 1:2, :), 2*xz_buf, mpireal, down, tag, & vx(:, ny+1:ny+2, :), 2*xz_buf, mpireal, up, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy(:, ny-2:ny, :), 3*xz_buf, mpireal, up, tag, & vy(:, -2:0, :), 3*xz_buf, mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy(:, 1:2, :), 2*xz_buf, mpireal, down, tag, & vy(:, ny+1:ny+2, :), 2*xz_buf, mpireal, up, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz(:, ny-2:ny, :), 3*xz_buf, mpireal, up, tag, & vz(:, -2:0, :), 3*xz_buf, mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz(:, 1:2, :), 2*xz_buf, mpireal, down, tag, & vz(:, ny+1:ny+2, :), 2*xz_buf, mpireal, up, tag, comm, & status, errcode) END SUBROUTINE velocity_MPI SUBROUTINE remap_v_MPI INTEGER :: x_extent, y_extent, z_extent INTEGER :: xy_buf, xz_buf, yz_buf x_extent = nx + 5 y_extent = ny + 5 z_extent = nz + 5 xy_buf = x_extent * y_extent xz_buf = x_extent * z_extent yz_buf = y_extent * z_extent CALL MPI_SENDRECV(vx1(:, :, 1:2), 2*xy_buf, mpireal, front, tag, & vx1(:, :, nz+1:nz+2), 2*xy_buf, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx1(:, :, nz-2:nz), 3*xy_buf, mpireal, back, tag, & vx1(:, :, -2:0), 3*xy_buf, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy1(:, :, 1:2), 2*xy_buf, mpireal, front, tag, & vy1(:, :, nz+1:nz+2), 2*xy_buf, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy1(:, :, nz-2:nz), 3*xy_buf, mpireal, back, tag, & vy1(:, :, -2:0), 3*xy_buf, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz1(:, :, 1:2), 2*xy_buf, mpireal, front, tag, & vz1(:, :, nz+1:nz+2), 2*xy_buf, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz1(:, :, nz-2:nz), 3*xy_buf, mpireal, back, tag, & vz1(:, :, -2:0), 3*xy_buf, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx1(1:2, :, :), 2*yz_buf, mpireal, left, tag, & vx1(nx+1:nx+2, :, :), 2*yz_buf, mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx1(nx-2:nx, :, :), 3*yz_buf, mpireal, right, tag, & vx1(-2:0, :, :), 3*yz_buf, mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy1(1:2, :, :), 2*yz_buf, mpireal, left, tag, & vy1(nx+1:nx+2, :, :), 2*yz_buf, mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy1(nx-2:nx, :, :), 3*yz_buf, mpireal, right, tag, & vy1(-2:0, :, :), 3*yz_buf, mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz1(1:2, :, :), 2*yz_buf, mpireal, left, tag, & vz1(nx+1:nx+2, :, :), 2*yz_buf, mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz1(nx-2:nx, :, :), 3*yz_buf, mpireal, right, tag, & vz1(-2:0, :, :), 3*yz_buf, mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx1(:, ny-2:ny, :), 3*xz_buf, mpireal, up, tag, & vx1(:, -2:0, :), 3*xz_buf, mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(vx1(:, 1:2, :), 2*xz_buf, mpireal, down, tag, & vx1(:, ny+1:ny+2, :), 2*xz_buf, mpireal, up, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy1(:, ny-2:ny, :), 3*xz_buf, mpireal, up, tag, & vy1(:, -2:0, :), 3*xz_buf, mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(vy1(:, 1:2, :), 2*xz_buf, mpireal, down, tag, & vy1(:, ny+1:ny+2, :), 2*xz_buf, mpireal, up, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz1(:, ny-2:ny, :), 3*xz_buf, mpireal, up, tag, & vz1(:, -2:0, :), 3*xz_buf, mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(vz1(:, 1:2, :), 2*xz_buf, mpireal, down, tag, & vz1(:, ny+1:ny+2, :), 2*xz_buf, mpireal, up, tag, comm, & status, errcode) END SUBROUTINE remap_v_MPI SUBROUTINE density_MPI CALL MPI_SENDRECV(rho(1:nx, 1:ny, 1:2), 2*nx*ny, mpireal, front, tag, & rho(1:nx, 1:ny, nz+1:nz+2), 2*nx*ny, mpireal, back, tag, comm, & status, errcode) CALL MPI_SENDRECV(rho(1:nx, 1:ny, nz-1:nz), 2*nx*ny, mpireal, back, tag, & rho(1:nx, 1:ny, -1:0), 2*nx*ny, mpireal, front, tag, comm, & status, errcode) CALL MPI_SENDRECV(rho(1:2, 1:ny, :), 2*ny*(nz+4), mpireal, left, tag, & rho(nx+1:nx+2, 1:ny, :), 2*ny*(nz+4), mpireal, right, tag, comm, & status, errcode) CALL MPI_SENDRECV(rho(nx-1:nx, 1:ny, :), 2*ny*(nz+4), mpireal, right, tag, & rho(-1:0, 1:ny, :), 2*ny*(nz+4), mpireal, left, tag, comm, & status, errcode) CALL MPI_SENDRECV(rho(:, ny-1:ny, :), 2*(nx+4)*(nz+4), mpireal, up, tag, & rho(:, -1:0, :), 2*(nx+4)*(nz+4), mpireal, down, tag, comm, & status, errcode) CALL MPI_SENDRECV(rho(:, 1:2, :), 2*(nx+4)*(nz+4), mpireal, down, tag, & rho(:, ny+1:ny+2, :), 2*(nx+4)*(nz+4), mpireal, up, tag, comm, & status, errcode) END SUBROUTINE density_MPI END MODULE mpiboundary Lare3d/src/core/neutral.f90000644 000765 000024 00000017655 11430520723 016010 0ustar00Tonystaff000000 000000 ! All the subroutines in this module are for the partially ionised flux ! emergence simulations; see Leake & Arber, 2006 MODULE neutral USE shared_data USE boundary USE eos IMPLICIT NONE PRIVATE PUBLIC :: perpendicular_resistivity, newton_relax, & neutral_fraction, setup_neutral, get_neutral, get_energy CONTAINS SUBROUTINE setup_neutral ! Ion neutral collision cross section(m^2) REAL(num) :: sigma_in = 5.0e-19_num IF (include_neutrals) THEN ALLOCATE(xi_n(-1:nx+2, -1:ny+2, -1:nz+2)) xi_n = 0.0_num END IF IF (cowling_resistivity) THEN ALLOCATE(eta_perp(-1:nx+2, -1:ny+2, -1:nz+2)) ALLOCATE(parallel_current(0:nx, 0:ny, 0:nz)) ALLOCATE(perp_current(0:nx, 0:ny, 0:nz)) END IF ! Ionisation potential of hydrogen(J) ionise_pot = ionise_pot_0 ! Temperature of the photospheric radiation field tr = 6400.0_num ! Calculate fbar^(2 / 3) in (k^-1 m^-2) f_bar = pi * (me_0 / h_0) * (kb_0 / h_0) f_bar = SQRT(2.0_num) * f_bar**(3.0_num / 2.0_num) ! Calculate tbar in (K) t_bar = ionise_pot / kb_0 ! Calculate rbar in (kg^-1) mbar = mh * mf r_bar = 4.0_num / mbar ! Calculate eta_bar in (m^4 / (k s kg^2)) eta_bar = 2.0_num * mbar & / (SQRT(16.0_num * kb_0 / (pi * mbar)) * sigma_in) END SUBROUTINE setup_neutral SUBROUTINE perpendicular_resistivity ! This subroutine calculates the cross field resistivity at the current ! temperature. REAL(num) :: f, xi_v, bxv, byv, bzv, bfieldsq, rho_v, t_v, T INTEGER :: ixp, iyp, izp DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx ixp = ix + 1 iyp = iy + 1 izp = iz + 1 ! Get the vertex density rho_v = rho(ix, iy, iz) * cv(ix, iy, iz) & + rho(ixp, iy , iz ) * cv(ixp, iy , iz ) & + rho(ix , iyp, iz ) * cv(ix , iyp, iz ) & + rho(ixp, iyp, iz ) * cv(ixp, iyp, iz ) & + rho(ix , iy , izp) * cv(ix , iy , izp) & + rho(ixp, iy , izp) * cv(ixp, iy , izp) & + rho(ix , iyp, izp) * cv(ix , iyp, izp) & + rho(ixp, iyp, izp) * cv(ixp, iyp, izp) rho_v = rho_v / (cv(ix, iy, iz) + cv(ixp, iy, iz) & + cv(ix, iyp, iz ) + cv(ixp, iyp, iz ) & + cv(ix, iy , izp) + cv(ixp, iy , izp) & + cv(ix, iyp, izp) + cv(ixp, iyp, izp)) ! Get the vertex magnetic field bxv = (bx(ix, iy, iz) + bx(ix, iyp, iz) + bx(ix, iy, izp) & + bx(ix, iyp, izp)) / 4.0_num byv = (by(ix, iy, iz) + by(ixp, iy, iz) + by(ix, iy, izp) & + by(ixp, iy, izp)) / 4.0_num bzv = (bz(ix, iy, iz) + bz(ixp, iy, iz) + bz(ix, iyp, iz) & + bz(ixp, iyp, iz)) / 4.0_num bfieldsq = bxv**2 + byv**2 + bzv**2 t_v = 0.0_num ! Get the vertex temperature DO izp = iz, iz + 1 DO iyp = iy, iy + 1 DO ixp = ix, ix + 1 CALL get_temp(rho(ixp, iyp, izp), energy(ixp, iyp, izp), & eos_number, ixp, iyp, izp, T) t_v = t_v + T END DO END DO END DO t_v = t_v / 8.0_num xi_v = get_neutral(t_v, rho_v) f = MAX(1.0_num - xi_v, none_zero) IF (f .GT. 0) THEN eta_perp(ix, iy, iz) = eta_bar * xi_v / f * bfieldsq & / rho_v**2 / SQRT(t_v) ELSE eta_perp(ix, iy, iz) = 0.0_num END IF ! eta_perp(ix, iy, iz) = MIN(eta_perp(ix, iy, iz), 0.0001_num) END DO END DO END DO END SUBROUTINE perpendicular_resistivity FUNCTION get_neutral(t_v, rho_v) REAL(num), INTENT(IN) :: t_v, rho_v REAL(num) :: get_neutral REAL(num) :: bof, r bof = 1.0_num / (f_bar * tr * SQRT(t_v)) & * EXP((0.25_num * (t_v / tr - 1.0_num) + 1.0_num) & * T_bar / t_v) r = 0.5_num * (-1.0_num + SQRT(1.0_num + r_bar * rho_v * bof)) get_neutral = r / (1.0_num + r) END FUNCTION get_neutral SUBROUTINE neutral_fraction(material) INTEGER, INTENT(IN) :: material REAL(num) :: t, rho0, e0, dx, x REAL(num), DIMENSION(2) :: ta, fa, xi_a REAL(num) :: ionise_pot_local INTEGER :: loop IF (material .EQ. EOS_ION) THEN ionise_pot_local = ionise_pot ELSE ionise_pot_local = 0.0_num END IF ! variable bof is b / f in the original version DO iz = -1, nz+2 DO iy = -1, ny+2 DO ix = -1, nx+2 rho0 = rho(ix, iy, iz) e0 = energy(ix, iy, iz) ta = (gamma - 1.0_num) & * (/ MAX((e0 - ionise_pot_local) / 2.0_num, none_zero), e0 /) IF (ta(1) > ta(2)) THEN PRINT *, "Temperature bounds problem", ta STOP END IF dx = ta(2) - ta(1) T = ta(1) DO loop = 1, 100 dx = dx / 2.0_num x = T + dx xi_a(1) = get_neutral(x, rho0) fa(1) = x - (gamma - 1.0_num) * (e0 & - (1.0_num - xi_a(1)) * ionise_pot_local) / (2.0_num - xi_a(1)) IF (fa(1) <= 0.0_num) T = x IF (ABS(dx) < 1.e-8_num .OR. fa(1) == 0.0_num) EXIT END DO xi_n(ix, iy, iz) = get_neutral(t, rho0) END DO END DO END DO END SUBROUTINE neutral_fraction SUBROUTINE get_energy(rho_in, temp_in, m_in, ix, iy, iz, en_out) ! this routine is only used by initial conditions and must be in SI ! with variables as defined with kb etc. The routine needs to be here to call ! get_neutral above REAL(num), INTENT(IN) :: rho_in, temp_in INTEGER, INTENT(IN) :: m_in, ix, iy, iz REAL(num), INTENT(OUT) :: en_out REAL(num) :: xi_local, bof, r IF (m_in .EQ. EOS_IDEAL) THEN en_out = temp_in * kb / ((gamma - 1.0_num) * mbar / 2.0_num) RETURN END IF IF (m_in .EQ. EOS_PI) THEN ! Since we can't guarantee that the ionisation fraction already ! calculated is correct here, calculate it straight from the temperature xi_local = get_neutral(temp_in, rho_in) en_out = (kb * temp_in * (2.0_num - xi_local)) & / (mbar * (gamma - 1.0_num)) RETURN END IF IF (m_in .EQ. EOS_ION) THEN ! Since we can't guarantee that the ionisation fraction already ! calculated is correct here, calculate it straight from the temperature xi_local = get_neutral(temp_in, rho_in) en_out = (kb * temp_in * (2.0_num - xi_local) & + (1.0_num - xi_local) * ionise_pot * (gamma - 1.0_num)) & / (mbar * (gamma - 1.0_num)) RETURN END IF END SUBROUTINE get_energy SUBROUTINE newton_relax !!$ INTEGER, DIMENSION(1) :: ref_index, z0(1) = 1 !!$ LOGICAL :: first_call = .TRUE., run_loop = .TRUE. !!$ !!$ ! This should only be run above the photosphere so first call sets up !!$ ! the lowest value of iz to use if at all, the -2 is due to zc starting !!$ ! at -1 !!$ IF (first_call) THEN !!$ z0 = MINLOC(ABS(zc - 0.0_num)) - 2 !!$ ! This process doesn't have any cells in the corona !!$ IF (z0(1) > nz) run_loop = .FALSE. !!$ IF (z0(1) < 1) z0(1) = 1 ! only need to run over the internal domain !!$ first_call = .FALSE. !!$ END IF !!$ !!$ ! For every point need to find the reference density value and hence !!$ ! the tau and temperature !!$ IF (run_loop) THEN !!$ DO iz = z0(1), nz !!$ DO iy = 1, ny !!$ DO ix = 1, nx !!$ ! the 2 is subtracted due to rho_ref starting at -1 !!$ ref_index = MINLOC(ABS(rho(ix, iy) - rho_ref)) - 2 !!$ energy(ix, iy) = (energy(ix, iy) + dt / tau_ref(ref_index(1)) * & !!$ T_ref(ref_index(1)) / (gamma - 1.0_num)) & !!$ / (1.0_num + dt / tau_ref(ref_index(1))) !!$ END DO !!$ END DO !!$ END DO !!$ END IF !!$ !!$ CALL energy_bcs END SUBROUTINE newton_relax END MODULE neutral Lare3d/src/core/normalise.f90000644 000765 000024 00000010636 11430520723 016317 0ustar00Tonystaff000000 000000 !** ! This module contains the conversion factors between normalised ! and internal code units !** MODULE normalise USE constants USE shared_data IMPLICIT NONE SAVE ! These are the real SI physical constants ! Permiability of free space REAL(num), PARAMETER :: mu0_0 = 4.0e-7_num * pi ! Boltzmann's Constant REAL(num), PARAMETER :: kb_0 = 1.3806504e-23_num ! Mass of hydrogen ion REAL(num), PARAMETER :: mh_0 = 1.67262158e-27_num ! Mass of electron REAL(num), PARAMETER :: me_0 = 9.10938188e-31_num ! Planck's constant REAL(num), PARAMETER :: h_0 = 6.626068e-34_num ! Ionisation potential of hydrogen in J REAL(num), PARAMETER :: ionise_pot_0 = 2.17870364e-18_num ! These contain the correct value for the constant ! at the point where the code is running ! Permiability of free space REAL(num) :: mu0 = mu0_0 ! Boltzmann's Constant REAL(num) :: kb = kb_0 ! Mass of hydrogen ion REAL(num) :: mh = mh_0 ! Average mass of all ions in proton masses REAL(num) :: mf ! Average mass of a an ion REAL(num) :: mbar ! Magnetic field conversion factor (in T) REAL(num) :: B0 = 1.0_num ! Specific energy density conversion factor in K REAL(num) :: energy0 = 1.0_num ! Velocity conversion factor in m / s REAL(num) :: vel0 = 1.0_num ! Density conversion factor in kgm^{ - 3} REAL(num) :: rho0 = 1.0_num ! Time conversion factor in units of s REAL(num) :: t0 = 1.0_num ! Length conversion factor in m REAL(num) :: L0 = 1.0_num ! Current conversion factor in A REAL(num) :: j0 = 1.0_num ! Temperature conversion factor REAL(num) :: temp0 = 1.0_num ! Pressure Conversion factor REAL(num) :: pressure0 = 1.0_num ! These conversion factors are used to convert additional variables ! They are locked once the basic conversion variables are set ! Gravity conversion factor in ms^{ - 2} REAL(num) :: grav0 = 1.0_num ! Viscosity conversion factor in m^2 / s REAL(num) :: visc0 = 1.0_num ! Reistivity conversion factor in m^2 / s REAL(num) :: res0 = 1.0_num ! Thermal conductivity conversion factor in kg / m / s REAL(num) :: kappa0 = 1.0_num CONTAINS SUBROUTINE set_normalisation ! Remember that the way to set the normalisations is to set ! B0 the normalising magnetic field (in Tesla) ! L0 the normalising length (in m) ! Rho0 the normalising density (in kgm^{ - 3}) ! The code will generate the rest temperature_100mk = 1.e8_num IF (.NOT. SI) THEN ! Assuming you have set B0, RHO0 and L0 correctly then this ! will calculate the normalised thermal conductivity ! The temperature dependence is added later mbar = mh * mf energy0 = B0**2 / (mu0 * rho0) kappa0 = energy0**(3.0_num / 2.0_num) * rho0 * L0 & / (mbar / kb * energy0)**(7.0_num / 2.0_num) kappa_0 = 1.e-11_num / kappa0 !find the normalised temperature corresponding to 100MK temp0 = (mbar / kb) * B0**2 / (mu0 * rho0) ! Temperature in K temperature_100mk = 1.e8_num / temp0 ! If not running as an SI code then force normalisation off ! Ignore any values read from the input deck B0 = 1.0_num rho0 = 1.0_num L0 = 1.0_num ! Set the constants to one as well, so that ! you can use them in the main code mu0 = 1.0_num kb = 1.0_num mh = 1.0_num mf = 1.0_num END IF ! Calculate the derived quantities CALL derived_quantities END SUBROUTINE set_normalisation SUBROUTINE derived_quantities ! Average ion mass mbar = mh * mf vel0 = B0 / SQRT(mu0 * rho0) ! Velocity energy0 = vel0**2 ! Specific energy density t0 = L0 / vel0 ! Time ! Put code in here to normalise any derived quantities grav0 = vel0**2 / L0 ! g in kgms^ - 2 ! viscosity (Input as inverse Reynolds so L0 * VEL0 * RHO0 not needed) visc0 = 1.0_num ! resistivity (Input as inverse Lundquist so L0 * VEL0 * MU0 not needed) res0 = 1.0_num pressure0 = B0**2 / mu0 ! Pressure temp0 = mbar * pressure0 / (kb * rho0) ! Temperature in K j0 = b0 / (L0 * mu0) ! Thermal conductivity kappa0 = energy0**(3.0_num / 2.0_num) * rho0 * L0 & / (mbar / kb * energy0)**(7.0_num / 2.0_num) END SUBROUTINE derived_quantities END MODULE normalise Lare3d/src/core/openboundary.f90000644 000765 000024 00000051050 11406627300 017030 0ustar00Tonystaff000000 000000 MODULE openboundary USE shared_data IMPLICIT NONE REAL(num) :: direction, pfar, rhofar, uxfar, uyfar, uzfar REAL(num) :: bxfar, byfar, bzfar, pbc, vnorm REAL(num), DIMENSION(0:1) :: vxbc, vybc, vzbc REAL(num), DIMENSION(0:1) :: bxbc, bybc, bzbc REAL(num), DIMENSION(0:1) :: rbc, ebc REAL(num) :: v0, v1 CONTAINS SUBROUTINE open_bcs REAL(num) :: bperp ! update ghost cells based on Riemann problem with farfield ! only expected to work perfectly for prblems with straight B field ! through boundaries which do not drastically change shape during the ! simulation. ! right boundary IF (xbc_right == BC_OPEN .AND. right == MPI_PROC_NULL) THEN DO iz = -1, nz+1 DO iy = -1, ny+1 ! variables carried out of domain by Riemann invariants vxbc(1) = vx(nx, iy, iz) vybc(1) = vy(nx, iy, iz) vzbc(1) = vz(nx, iy, iz) bxbc(1) = bx(nx, iy, iz) bybc(1) = by(nx, iy, iz) bzbc(1) = bz(nx, iy, iz) rbc(1) = rho(nx, iy, iz) rbc(0) = rho(nx+1, iy, iz) ebc(1) = energy(nx, iy, iz) pbc = (gamma - 1.0_num) * energy(nx, iy, iz) * rho(nx, iy, iz) ! farfield values carried into domain pfar = (gamma - 1.0_num) * energy(nx+2, iy, iz) * rho(nx+2, iy, iz) rhofar = rho(nx+2, iy, iz) uxfar = vx(nx+2, iy, iz) uyfar = vy(nx+2, iy, iz) uzfar = vz(nx+2, iy, iz) bxfar = bx(nx+2, iy, iz) byfar = by(nx+2, iy, iz) bzfar = bz(nx+2, iy, iz) ! direction of boundary (1 = right ; -1 = left) direction = 1.0_num vnorm = vx(nx, iy, iz) ! select correct open bc solver bperp = SQRT(byfar**2 + bzfar**2) IF (ABS(bxfar) <= 0.01_num * bperp) THEN CALL open_bcs_1 ELSE IF (bperp <= 0.01_num * ABS(bxfar)) THEN CALL open_bcs_2 ELSE CALL open_bcs_3 END IF v0 = SQRT(vx(nx+1, iy, iz)**2 & + vy(nx+1, iy, iz)**2 + vz(nx+1, iy, iz)**2) v1 = SQRT(vxbc(0)**2 + vybc(0)**2 + vzbc(0)**2) IF (ABS(v0 - v1) < ABS(dv_right(iy, iz))) THEN rho(nx+1, iy, iz) = rbc(0) energy(nx+1, iy, iz) = ebc(0) bz(nx+1, iy, iz) = bzbc(0) by(nx+1, iy, iz) = bybc(0) bx(nx+1, iy, iz) = bxbc(0) vx(nx+1, iy, iz) = vxbc(0) vy(nx+1, iy, iz) = vybc(0) vz(nx+1, iy, iz) = vzbc(0) ELSE vx(nx+1, iy, iz) = vxbc(1) vy(nx+1, iy, iz) = vybc(1) vz(nx+1, iy, iz) = vzbc(1) END IF END DO END DO END IF ! left bounday IF (xbc_left == BC_OPEN .AND. left == MPI_PROC_NULL) THEN DO iz = -1, nz+1 DO iy = -1, ny+1 vxbc(1) = vx(0, iy, iz) vybc(1) = vy(0, iy, iz) vzbc(1) = vz(0, iy, iz) bxbc(1) = bx(0, iy, iz) bybc(1) = by(1, iy, iz) bzbc(1) = bz(1, iy, iz) rbc(1) = rho(1, iy, iz) rbc(0) = rho(0, iy, iz) ebc(1) = energy(1, iy, iz) pbc = (gamma - 1.0_num) * energy(1, iy, iz) * rho(1, iy, iz) pfar = (gamma - 1.0_num) * energy(-1, iy, iz) * rho(-1, iy, iz) rhofar = rho(-1, iy, iz) uxfar = vx(-2, iy, iz) uyfar = vy(-2, iy, iz) uzfar = vz(-2, iy, iz) bxfar = bx(-2, iy, iz) byfar = by(-1, iy, iz) bzfar = bz(-1, iy, iz) direction = -1.0_num vnorm = vx(0, iy, iz) bperp = SQRT(byfar**2 + bzfar**2) IF (ABS(bxfar) <= 0.01_num * bperp) THEN CALL open_bcs_1 ELSE IF (bperp <= 0.01_num * ABS(bxfar)) THEN CALL open_bcs_2 ELSE CALL open_bcs_3 END IF v0 = SQRT(vx(-1, iy, iz)**2 + vy(-1, iy, iz)**2 + vz(-1, iy, iz)**2) v1 = SQRT(vxbc(0)**2 + vybc(0)**2 + vzbc(0)**2) IF (ABS(v0 - v1) < ABS(dv_left(iy, iz))) THEN rho(0, iy, iz) = rbc(0) energy(0, iy, iz) = ebc(0) bz(0, iy, iz) = bzbc(0) bx(-1, iy, iz) = bxbc(0) vx(-1, iy, iz) = vxbc(0) vy(-1, iy, iz) = vybc(0) vz(-1, iy, iz) = vzbc(0) by(0, iy, iz) = bybc(0) ELSE vx(-1, iy, iz) = vxbc(1) vy(-1, iy, iz) = vybc(1) vz(-1, iy, iz) = vzbc(1) END IF END DO END DO END IF ! top boundary IF (ybc_up == BC_OPEN .AND. up == MPI_PROC_NULL) THEN DO iz = -1, nz+1 DO ix = -1, nx+1 vxbc(1) = vy(ix, ny, iz) vybc(1) = vx(ix, ny, iz) vzbc(1) = vz(ix, ny, iz) bxbc(1) = by(ix, ny, iz) bybc(1) = bx(ix, ny, iz) bzbc(1) = bz(ix, ny, iz) rbc(1) = rho(ix, ny, iz) rbc(0) = rho(ix, ny+1, iz) ebc(1) = energy(ix, ny, iz) pbc = (gamma - 1.0_num) * energy(ix, ny, iz) * rho(ix, ny, iz) pfar = (gamma - 1.0_num) * energy(ix, ny+2, iz) * rho(ix, ny+2, iz) rhofar = rho(ix, ny+2, iz) uxfar = vy(ix, ny+2, iz) uyfar = vx(ix, ny+2, iz) uzfar = vz(ix, ny+2, iz) bxfar = by(ix, ny+2, iz) byfar = bx(ix, ny+2, iz) bzfar = bz(ix, ny+2, iz) direction = 1.0_num vnorm = vy(ix, ny, iz) bperp = SQRT(byfar**2 + bzfar**2) IF (ABS(bxfar) <= 0.01_num * bperp) THEN CALL open_bcs_1 ELSE IF (bperp <= 0.01_num * ABS(bxfar)) THEN CALL open_bcs_2 ELSE CALL open_bcs_3 END IF v0 = SQRT(vx(ix, ny+1, iz)**2 & + vy(ix, ny+1, iz)**2 + vz(ix, ny+1, iz)**2) v1 = SQRT(vxbc(0)**2 + vybc(0)**2 + vzbc(0)**2) IF (ABS(v0 - v1) < ABS(dv_up(ix, iz))) THEN rho(ix, ny+1, iz) = rbc(0) energy(ix, ny+1, iz) = ebc(0) bz(ix, ny+1, iz) = bzbc(0) by(ix, ny+1, iz) = bxbc(0) vx(ix, ny+1, iz) = vybc(0) vy(ix, ny+1, iz) = vxbc(0) vz(ix, ny+1, iz) = vzbc(0) bx(ix, ny+1, iz) = bybc(0) ELSE vx(ix, ny+1, iz) = vybc(1) vy(ix, ny+1, iz) = vxbc(1) vz(ix, ny+1, iz) = vzbc(1) END IF END DO END DO END IF ! bottom boundary IF (ybc_down == BC_OPEN .AND. down == MPI_PROC_NULL) THEN DO iz = -1, nz+1 DO ix = -1, nx+1 vxbc(1) = vy(ix, 0, iz) vybc(1) = vx(ix, 0, iz) vzbc(1) = vz(ix, 0, iz) bxbc(1) = by(ix, 0, iz) bybc(1) = bx(ix, 1, iz) bzbc(1) = bz(ix, 1, iz) rbc(1) = rho(ix, 1, iz) rbc(0) = rho(ix, 0, iz) ebc(1) = energy(ix, 1, iz) pbc = (gamma - 1.0_num) * energy(ix, 1, iz) * rho(ix, 1, iz) pfar = (gamma - 1.0_num) * energy(ix, -1, iz) * rho(ix, -1, iz) rhofar = rho(ix, -1, iz) uxfar = vy(ix, -2, iz) uyfar = vx(ix, -2, iz) uzfar = vz(ix, -2, iz) bxfar = by(ix, -2, iz) byfar = bx(ix, -1, iz) bzfar = bz(ix, -1, iz) direction = -1.0_num vnorm = vy(ix, 0, iz) bperp = SQRT(byfar**2 + bzfar**2) IF (ABS(bxfar) <= 0.01_num * bperp) THEN CALL open_bcs_1 ELSE IF (bperp <= 0.01_num * ABS(bxfar)) THEN CALL open_bcs_2 ELSE CALL open_bcs_3 END IF v0 = SQRT(vx(ix, -1, iz)**2 + vy(ix, -1, iz)**2 + vz(ix, -1, iz)**2) v1 = SQRT(vxbc(0)**2 + vybc(0)**2 + vzbc(0)**2) IF (ABS(v0 - v1) < ABS(dv_down(ix, iz))) THEN rho(ix, 0, iz) = rbc(0) energy(ix, 0, iz) = ebc(0) bz(ix, 0, iz) = bzbc(0) by(ix, -1, iz) = bxbc(0) vx(ix, -1, iz) = vybc(0) vy(ix, -1, iz) = vxbc(0) vz(ix, -1, iz) = vzbc(0) bx(ix, 0, iz) = bybc(0) ELSE vx(ix, -1, iz) = vybc(1) vy(ix, -1, iz) = vxbc(1) vz(ix, -1, iz) = vzbc(1) END IF END DO END DO END IF ! back boundary IF (zbc_back == BC_OPEN .AND. back == MPI_PROC_NULL) THEN DO iy = -1, ny+1 DO ix = -1, nx+1 vxbc(1) = vz(ix, iy, nz) vybc(1) = vy(ix, iy, nz) vzbc(1) = vx(ix, iy, nz) bxbc(1) = bz(ix, iy, nz) bybc(1) = by(ix, iy, nz) bzbc(1) = bx(ix, iy, nz) rbc(1) = rho(ix, iy, nz) rbc(0) = rho(ix, iy, nz+1) ebc(1) = energy(ix, iy, nz) pbc = (gamma - 1.0_num) * energy(ix, iy, nz) * rho(ix, iy, nz) pfar = (gamma - 1.0_num) * energy(ix, iy, nz+2) * rho(ix, iy, nz+2) rhofar = rho(ix, iy, nz+2) uxfar = vz(ix, iy, nz+2) uyfar = vy(ix, iy, nz+2) uzfar = vx(ix, iy, nz+2) bxfar = bz(ix, iy, nz+2) byfar = by(ix, iy, nz+2) bzfar = bx(ix, iy, nz+2) direction = 1.0_num vnorm = vz(ix, iy, nz) bperp = SQRT(byfar**2 + bzfar**2) IF (ABS(bxfar) <= 0.01_num * bperp) THEN CALL open_bcs_1 ELSE IF (bperp <= 0.01_num * ABS(bxfar)) THEN CALL open_bcs_2 ELSE CALL open_bcs_3 END IF v0 = SQRT(vx(ix, iy, nz+1)**2 & + vy(ix, iy, nz+1)**2 + vz(ix, iy, nz+1)**2) v1 = SQRT(vxbc(0)**2 + vybc(0)**2 + vzbc(0)**2) IF (ABS(v0 - v1) < ABS(dv_back(ix, iy))) THEN rho(ix, iy, nz+1) = rbc(0) energy(ix, iy, nz+1) = ebc(0) bz(ix, iy, nz+1) = bxbc(0) by(ix, iy, nz+1) = bybc(0) vx(ix, iy, nz+1) = vzbc(0) vy(ix, iy, nz+1) = vybc(0) vz(ix, iy, nz+1) = vxbc(0) bx(ix, iy, nz+1) = bzbc(0) ELSE vx(ix, iy, nz+1) = vzbc(1) vy(ix, iy, nz+1) = vybc(1) vz(ix, iy, nz+1) = vxbc(1) END IF END DO END DO END IF ! front boundary IF (zbc_front == BC_OPEN .AND. front == MPI_PROC_NULL) THEN DO iy = -1, ny+1 DO ix = -1, nx+1 vxbc(1) = vz(ix, iy, 0) vybc(1) = vy(ix, iy, 0) vzbc(1) = vx(ix, iy, 0) bxbc(1) = bz(ix, iy, 0) bybc(1) = by(ix, iy, 1) bzbc(1) = bx(ix, iy, 1) rbc(1) = rho(ix, iy, 1) rbc(0) = rho(ix, iy, 0) ebc(1) = energy(ix, iy, 1) pbc = (gamma - 1.0_num) * energy(ix, iy, 1) * rho(ix, iy, 1) pfar = (gamma - 1.0_num) * energy(ix, iy, -1) * rho(ix, iy, -1) rhofar = rho(ix, iy, -1) uxfar = vz(ix, iy, -2) uyfar = vy(ix, iy, -2) uzfar = vx(ix, iy, -2) bxfar = bz(ix, iy, -2) byfar = by(ix, iy, -1) bzfar = bx(ix, iy, -1) direction = -1.0_num vnorm = vz(ix, iy, 0) bperp = SQRT(byfar**2 + bzfar**2) IF (ABS(bxfar) <= 0.01_num * bperp) THEN CALL open_bcs_1 ELSE IF (bperp <= 0.01_num * ABS(bxfar)) THEN CALL open_bcs_2 ELSE CALL open_bcs_3 END IF v0 = SQRT(vx(ix, iy, -1)**2 + vy(ix, iy, -1)**2 + vz(ix, iy, -1)**2) v1 = SQRT(vxbc(0)**2 + vybc(0)**2 + vzbc(0)**2) IF (ABS(v0 - v1) < ABS(dv_front(ix, iy))) THEN rho(ix, iy, 0) = rbc(0) energy(ix, iy, 0) = ebc(0) bz(ix, iy, -1) = bxbc(0) by(ix, iy, 0) = bybc(0) vx(ix, iy, -1) = vzbc(0) vy(ix, iy, -1) = vybc(0) vz(ix, iy, -1) = vxbc(0) bx(ix, iy, 0) = bzbc(0) ELSE vx(ix, iy, -1) = vzbc(1) vy(ix, iy, -1) = vybc(1) vz(ix, iy, -1) = vxbc(1) END IF END DO END DO END IF END SUBROUTINE open_bcs SUBROUTINE open_bcs_1 ! open bc when bx = 0 REAL(num) :: c0, ct, cf REAL(num) :: pg, rhog, cffar, c0far, ctfar REAL(num) :: pmagg, uxg REAL(num), DIMENSION(3) :: vtest, pstar, vstar, rhostar, pmagstar INTEGER :: i c0far = SQRT(gamma * pfar / rhofar) ctfar = SQRT((byfar**2 + bzfar**2) / rhofar) cffar = SQRT(c0far**2 + ctfar**2) c0 = SQRT(gamma * (gamma - 1.0_num) * ebc(1)) ct = SQRT((bybc(1)**2 + bzbc(1)**2) / rbc(1)) cf = SQRT(c0**2 + ct**2) ! Define the speeds of the characteristics to be checked along vtest(1) = vnorm + cf vtest(2) = vnorm - cf vtest(3) = vnorm DO i = 1, 3 IF (direction * vtest(i) >= 0.0_num) THEN pstar(i) = pbc + 0.5_num * (bybc(1)**2 + bzbc(1)**2 - bxbc(1)**2) pmagstar(i) = 0.5_num * (bybc(1)**2 + bzbc(1)**2 - bxbc(1)**2) vstar(i) = vxbc(1) rhostar(i) = rbc(1) ELSE pstar(i) = pfar + 0.5_num * (byfar**2 + bzfar**2 - bxfar**2) pmagstar(i) = 0.5_num * (byfar**2 + bzfar**2 - bxfar**2) vstar(i) = uxfar rhostar(i) = rhofar END IF END DO bxbc(0) = bxbc(1) bybc(0) = bybc(1) bzbc(0) = bzbc(1) pmagg = 0.5_num * (bybc(0)**2 + bzbc(0)**2 - bxbc(0)**2) pg = 0.5_num & * (pstar(1) + pstar(2) + rhofar * cffar * (vstar(1) - vstar(2))) rhog = ((pg - pmagg) - (pstar(3) - pmagstar(3))) / c0far**2 + rhostar(3) rbc(0) = MAX(rhog, none_zero) ebc(0) = MAX(pg - pmagg, none_zero) / (gamma - 1.0_num) / rbc(0) uxg = 0.5_num & * (vstar(1) + vstar(2) + (pstar(1) - pstar(2)) / (rhofar * cffar)) vxbc(0) = uxg vybc(0) = vybc(1) vzbc(0) = vzbc(1) END SUBROUTINE open_bcs_1 SUBROUTINE open_bcs_2 ! open bc when bperp = 0 REAL(num) :: lambdayfar, lambdazfar REAL(num) :: c0, cx REAL(num) :: pg, rhog, c0far, cxfar REAL(num) :: pmagg, uxg, uyg, uzg, lambdag REAL(num), DIMENSION(5) :: vtest, pstar, uxstar, rhostar, pmagstar REAL(num), DIMENSION(5) :: uystar, lambdaystar, lambdazstar, uzstar INTEGER :: i lambdayfar = -bxfar * byfar lambdazfar = -bxfar * bzfar c0far = SQRT(gamma * pfar / rhofar) cxfar = SQRT(bxfar**2 / rhofar) c0 = SQRT(gamma * (gamma - 1.0_num) * ebc(1)) cx = SQRT(bxbc(1)**2 / rbc(1)) ! Define the speeds of the characteristics to be checked along vtest(1) = vnorm + c0 vtest(2) = vnorm - c0 vtest(3) = vnorm + cx vtest(4) = vnorm - cx vtest(5) = vnorm DO i = 1, 5 IF (direction * vtest(i) >= 0.0_num) THEN pstar(i) = pbc + 0.5_num * (bybc(1)**2 + bzbc(1)**2 - bxbc(1)**2) pmagstar(i) = 0.5_num * (bybc(1)**2 + bzbc(1)**2 - bxbc(1)**2) lambdaystar(i) = -bybc(1) * bxbc(1) lambdazstar(i) = -bzbc(1) * bxbc(1) uxstar(i) = vxbc(1) uystar(i) = vybc(1) uzstar(i) = vzbc(1) rhostar(i) = rbc(1) ELSE pstar(i) = pfar + 0.5_num * (byfar**2 + bzfar**2 - bxfar**2) pmagstar(i) = 0.5_num * (byfar**2 + bzfar**2 - bxfar**2) lambdaystar(i) = lambdayfar lambdazstar(i) = lambdazfar uxstar(i) = uxfar uystar(i) = uyfar uzstar(i) = uzfar rhostar(i) = rhofar END IF END DO bxbc(0) = bxbc(1) lambdag = 0.5_num * (lambdaystar(3) + lambdaystar(4) & + rhofar * cxfar * (uystar(3) - uystar(4))) bybc(0) = -lambdag / bxbc(0) lambdag = 0.5_num * (lambdazstar(3) + lambdazstar(4) & + rhofar * cxfar * (uzstar(3) - uzstar(4))) bzbc(0) = -lambdag / bxbc(0) uyg = 0.5_num * (uystar(3) + uystar(4) & + (lambdaystar(3) - lambdaystar(4)) / (rhofar * cxfar)) vybc(0) = uyg uzg = 0.5_num * (uzstar(3) + uzstar(4) & + (lambdazstar(3) - lambdazstar(4)) / (rhofar * cxfar)) vzbc(0) = uzg pmagg = 0.5_num * (bybc(0)**2 + bzbc(0)**2 - bxbc(0)**2) pg = 0.5_num & * (pstar(1) + pstar(2) + rhofar * c0far * (uxstar(1) - uxstar(2))) rhog = ((pg - pmagg) - (pstar(5) - pmagstar(5))) / c0far**2 + rhostar(5) rbc(0) = MAX(rhog, none_zero) ebc(0) = MAX(pg - pmagg, none_zero) / (gamma - 1.0_num) / rbc(0) uxg = 0.5_num & * (uxstar(1) + uxstar(2) + (pstar(1) - pstar(2)) / (rhofar * c0far)) vxbc(0) = uxg END SUBROUTINE open_bcs_2 SUBROUTINE open_bcs_3 ! Solve for when bx and bperp are non zero. Solves in the coordinate system ! such that y-axis points along by_farfield REAL(num), DIMENSION(7) :: vtest INTEGER :: i REAL(num) :: a, b, c, d, e, f, g REAL(num) :: pmagg, pmagfar, phi, theta REAL(num) :: c0, cx, ct, cf, cs REAL(num) :: lambdafar, byfar2 REAL(num) :: c0far, cxfar, ctfar, cffar, csfar REAL(num) :: pg, rhog, uxg, uyg, uzg, lambdag, byg, bxg, bzg REAL(num), DIMENSION(7) :: pstar, uxstar, uystar, uzstar, rhostar REAL(num), DIMENSION(7) :: lambdastar, pmagstar, bzstar ! Setup the far field variables byfar2 = SQRT(byfar**2 + bzfar**2) phi = ATAN2(bzfar, byfar) pmagfar = 0.5_num * (byfar2**2 - bxfar**2) pfar = pfar + pmagfar lambdafar = -bxfar * byfar2 c0far = SQRT(gamma * (pfar - pmagfar) / rhofar) cxfar = SQRT(bxfar**2 / rhofar) ctfar = SQRT(byfar2**2 / rhofar) cffar = SQRT(0.5_num * ((c0far**2 + cxfar**2 + ctfar**2) & + SQRT((c0far**2 + cxfar**2 + ctfar**2)**2 & - 4.0_num * c0far**2 * cxfar**2))) csfar = SQRT(0.5_num * ((c0far**2 + cxfar**2 + ctfar**2) & - SQRT((c0far**2 + cxfar**2 + ctfar**2)**2 & - 4.0_num * c0far**2 * cxfar**2))) ! Setup the speeds c0 = SQRT(gamma * (gamma - 1.0_num) * ebc(1)) cx = SQRT(bxbc(1)**2 / rbc(1)) ct = SQRT((bybc(1)**2 + bzbc(1)**2) / rbc(1)) cf = SQRT(0.5_num * ((c0**2 + cx**2 + ct**2) & + SQRT((c0**2 + cx**2 + ct**2)**2 - 4.0_num * c0**2 * cx**2))) cs = SQRT(0.5_num * ((c0**2 + cx**2 + ct**2) & - SQRT((c0**2 + cx**2 + ct**2)**2 - 4.0_num * c0**2 * cx**2))) ! Define the speeds of the characteristics to be checked along vtest(1) = vnorm + cf vtest(2) = vnorm - cf vtest(3) = vnorm - cs vtest(4) = vnorm + cs vtest(5) = vnorm vtest(6) = vnorm + cx vtest(7) = vnorm - cx ! Now check which characteristics are inflowing, outflowing, non-moving DO i = 1, 7 IF (direction * vtest(i) >= 0.0_num) THEN pstar(i) = pbc + 0.5_num * (bybc(1)**2 + bzbc(1)**2 - bxbc(1)**2) pmagstar(i) = 0.5_num * (bybc(1)**2 + bzbc(1)**2 - bxbc(1)**2) rhostar(i) = rbc(1) lambdastar(i) = -bxbc(1) * (bybc(1) * byfar + bzbc(1) * bzfar) / byfar2 theta = ATAN2(bzbc(1), bybc(1)) bzstar(i) = SQRT(bybc(1)**2 + bzbc(1)**2) * SIN(theta - phi) uystar(i) = (vybc(1) * byfar + vzbc(1) * bzfar) / byfar2 uzstar(i) = SQRT(vybc(1)**2 + vzbc(1)**2) * SIN(theta - phi) uxstar(i) = vxbc(1) ELSE pstar(i) = pfar pmagstar(i) = pmagfar rhostar(i) = rhofar lambdastar(i) = lambdafar bzstar(i) = 0.0_num uystar(i) = uyfar uzstar(i) = uzfar uxstar(i) = uxfar END IF END DO ! Now setup the constants that are defined in the solution a = (cffar**2 - cxfar**2) b = (lambdafar / rhofar) c = (csfar**2 - cxfar**2) d = pstar(1) + pstar(2) + rhofar * cffar * (uxstar(1) - uxstar(2)) e = lambdastar(1) + lambdastar(2) + rhofar * cffar * (uystar(1) - uystar(2)) f = pstar(3) + pstar(4) - rhofar * csfar * (uxstar(3) - uxstar(4)) g = lambdastar(3) + lambdastar(4) - rhofar * csfar * (uystar(3) - uystar(4)) pg = 0.5_num * (a*d + b*e - c*f - b*g) / (a - c) lambdag = 0.5_num * (c * (a*d + b*e) - a * (c*f + b*g)) / (b * (c - a)) d = (pstar(1) - pstar(2)) / (rhofar * cffar) + (uxstar(1) + uxstar(2)) e = (lambdastar(1) - lambdastar(2)) & / (rhofar * cffar) + (uystar(1) + uystar(2)) f = (pstar(4) - pstar(3)) / (rhofar * csfar) + (uxstar(3) + uxstar(4)) g = (lambdastar(4) - lambdastar(3)) & / (rhofar * csfar) + (uystar(3) + uystar(4)) uxg = 0.5_num * (a*d + b*e - c*f - b*g) / (a - c) uyg = 0.5_num * (c * (a*d + b*e) - a * (c*f + b*g)) / (b * (c - a)) a = cxfar * rhofar / bxfar bzg = 0.5_num * (bzstar(6) + bzstar(7) + a * (uzstar(7) - uzstar(6))) uzg = 0.5_num * (uzstar(6) + uzstar(7) + (bzstar(7) - bzstar(6)) / a) bxg = bxbc(1) byg = -lambdag / bxg pmagg = 0.5_num * (byg**2 + bzg**2 - bxg**2) rhog = ((pg - pmagg) - (pstar(5) - pmagstar(5))) / c0**2 + rhostar(5) rhog = MAX(rhog, none_zero) rbc(0) = rhog ebc(0) = MAX(pg - pmagg, none_zero) / ((gamma - 1.0_num) * rhog) ! rotate back to grid coordinate system bxbc(0) = bxg bybc(0) = byg * COS(phi) - bzg * SIN(phi) bzbc(0) = byg * SIN(phi) + bzg * COS(phi) vxbc(0) = uxg vybc(0) = uyg * COS(phi) - uzg * SIN(phi) vzbc(0) = uyg * SIN(phi) + uzg * COS(phi) END SUBROUTINE open_bcs_3 END MODULE openboundary Lare3d/src/core/README000644 000765 000024 00000003411 11406627300 014661 0ustar00Tonystaff000000 000000 LARE3D core directory This directory contains the core of the solver and the physics packages for LARE2D. Many of the files in this directory are core parts of the code which should not be altered by end users, but there are some sections which can be changed to change the range of simulations which can be performed by LARE. --------------------------------------------------------------- shared_data.F90 shared_data.F90 is the module which contains the global arrays used by LARE (compare with F77 common blocks). Any new global variables which are needed should be defined here. -------------------------------------------------------------- mpi_routines.f90 mpi_routines.f90 is the module which sets up the MPI parallel part of the code, and also allocates the main code arrays. Any new array variable added to the code should be allocated/deallocated in this module, using the existing arrays as a guide. -------------------------------------------------------------- lagran.f90 Most of lagran.f90 should not be altered by end users, but the single subroutine "eta_calc" is used to calculate the resistivity in the code. In the routine, simply set the array "eta" to the required value at every point in space and the code will then apply that value of resistivity in the simulation. -------------------------------------------------------------- eos.f90 It is possible to add new equations of state to LARE by modifying the subroutines in this module to calculate 1) Pressure given specific internal energy and density 2) Specific internal energy given temperature and density 3) Temperature given specific internal energy and density 4) Sound speed given specific internal energy and density The rest of the code has been written to work with whatever equation of state is given here.Lare3d/src/core/remap.f90000644 000765 000024 00000004325 11406627300 015432 0ustar00Tonystaff000000 000000 MODULE remap USE shared_data USE xremap USE yremap USE zremap IMPLICIT NONE PRIVATE PUBLIC :: eulerian_remap CONTAINS SUBROUTINE eulerian_remap(i) ! Strang splitting INTEGER, INTENT(IN) :: i INTEGER :: case_test delta_ke = 0.0_num xpass = 1 ypass = 1 zpass = 1 DO iz = -1, nz+2 DO iy = -1, ny+2 DO ix = -1, nx+2 bx(ix, iy, iz) = bx(ix, iy, iz) * (dyb(iy) * dzb(iz)) by(ix, iy, iz) = by(ix, iy, iz) * (dxb(ix) * dzb(iz)) bz(ix, iy, iz) = bz(ix, iy, iz) * (dxb(ix) * dyb(iy)) END DO END DO END DO DO iz = -1, nz+2 DO iy = -1, ny+2 bx(-2, iy, iz) = bx(-2, iy, iz) * (dyb(iy) * dzb(iz)) END DO END DO DO iz = -1, nz+2 DO ix = -1, nx+2 by(ix, -2, iz) = by(ix, -2, iz) * (dxb(ix) * dzb(iz)) END DO END DO DO iy = -1, ny+2 DO ix = -1, nx+2 bz(ix, iy, -2) = bz(ix, iy, -2) * (dxb(ix) * dyb(iy)) END DO END DO case_test = MODULO(i, 6) SELECT CASE(case_test) ! Strang ordering CASE (0) CALL remap_x CALL remap_y CALL remap_z CASE (1) CALL remap_y CALL remap_z CALL remap_x CASE (2) CALL remap_z CALL remap_x CALL remap_y CASE (3) CALL remap_x CALL remap_z CALL remap_y CASE (4) CALL remap_z CALL remap_y CALL remap_x CASE (5) CALL remap_y CALL remap_x CALL remap_z END SELECT DO iz = -1, nz+2 DO iy = -1, ny+2 DO ix = -1, nx+2 bx(ix, iy, iz) = bx(ix, iy, iz) / (dyb(iy) * dzb(iz)) by(ix, iy, iz) = by(ix, iy, iz) / (dxb(ix) * dzb(iz)) bz(ix, iy, iz) = bz(ix, iy, iz) / (dxb(ix) * dyb(iy)) END DO END DO END DO DO iz = -1, nz+2 DO iy = -1, ny+2 bx(-2, iy, iz) = bx(-2, iy, iz) / (dyb(iy) * dzb(iz)) END DO END DO DO iz = -1, nz+2 DO ix = -1, nx+2 by(ix, -2, iz) = by(ix, -2, iz) / (dxb(ix) * dzb(iz)) END DO END DO DO iy = -1, ny+2 DO ix = -1, nx+2 bz(ix, iy, -2) = bz(ix, iy, -2) / (dxb(ix) * dyb(iy)) END DO END DO END SUBROUTINE eulerian_remap END MODULE remap Lare3d/src/core/setup.F90000644 000765 000024 00000040114 11406627300 015422 0ustar00Tonystaff000000 000000 MODULE setup USE shared_data USE normalise USE iocommon USE iocontrol USE input USE input_cartesian IMPLICIT NONE PRIVATE PUBLIC :: before_control, after_control PUBLIC :: grid PUBLIC :: open_files, close_files, restart_data PUBLIC :: normalise_code, normalise_neutral REAL(num), DIMENSION(:), ALLOCATABLE :: dxnew, dynew, dznew CONTAINS SUBROUTINE before_control ! Setup basic variables which have to have default values nprocx = 0 nprocy = 0 nprocz = 0 time = 0.0_num gamma = 5.0_num / 3.0_num IF (num .EQ. 4) mpireal = MPI_REAL END SUBROUTINE before_control SUBROUTINE after_control ! Setup arrays and other variables which can only be set after ! user input IF (IAND(initial, IC_RESTART) .EQ. 0) restart_snapshot = 0 p_visc = 0.0_num eta = 0.0_num grav = 0.0_num lambda_i = 0.0_num rho = 0.0_num energy = 0.0_num bx = 0.0_num by = 0.0_num bz = 0.0_num vx = 0.0_num vy = 0.0_num vz = 0.0_num END SUBROUTINE after_control SUBROUTINE normalise_code ! Normalise physical parameters (eta, grav, visc3 etc.) CALL normalise_constants ! setup.f90 ! Normalise the grid CALL normalise_grid ! setup.f90 ! Normalise the actual initial conditions CALL normalise_eqm ! setup.f90 ! Normalise the constants needed for the ! partially ionised plasma routines CALL normalise_neutral ! setup.f90 END SUBROUTINE normalise_code SUBROUTINE normalise_grid ! Normalise the grid, including control volumes xb = xb / L0 xc = xc / L0 xb_global = xb_global / L0 dxc = dxc / L0 dxb = dxb / L0 yb = yb / L0 yc = yc / L0 yb_global = yb_global / L0 dyc = dyc / L0 dyb = dyb / L0 zb = zb / L0 zc = zc / L0 zb_global = zb_global / L0 dzc = dzc / L0 dzb = dzb / L0 cv = cv / L0**3 END SUBROUTINE normalise_grid SUBROUTINE normalise_eqm ! Normalise the initial conditions rho = rho / rho0 energy = energy / energy0 vx = vx / vel0 vy = vy / vel0 vz = vz / vel0 bx = bx / B0 by = by / B0 bz = bz / B0 END SUBROUTINE normalise_eqm SUBROUTINE normalise_constants ! Normalise gravity etc. grav = grav / grav0 visc3 = visc3 / visc0 eta0 = eta0 / res0 eta_background = eta_background / res0 ! the SI value for the constant in the conductivity ! assuming ln(Lambda) = 18.4 kappa_0 = 1.0e-11_num kappa_0 = kappa_0 / kappa0 time = time / T0 t_end = t_end / T0 dt_snapshots = dt_snapshots / T0 lambda_i = lambda_i / L0 END SUBROUTINE normalise_constants SUBROUTINE normalise_neutral ! Normalise constants used in the calculation of properties ! Of partially ionised plasmas ! Normalised mass REAL(num) :: eta_bar_0 pressure0 = B0**2 / mu0 ! Pressure energy0 = B0**2 / (mu0 * rho0) res0 = 1.0_num temp0 = mbar * pressure0 / (kb * rho0) ! Temperature in K ! Normalise tbar t_bar = t_bar / temp0 ! Redefine rbar to incluse normalisation from rho and b/f r_bar = r_bar * rho0 / temp0**(3.0_num / 2.0_num) ! Normalise eta_bar eta_bar_0 = rho0**2 * SQRT(temp0) * res0 / B0**2 eta_bar = eta_bar / eta_bar_0 ! Finally normalise ion_mass and ionise_pot which are needed in the code ionise_pot = ionise_pot / (energy0 * mbar) tr = tr / temp0 END SUBROUTINE normalise_neutral SUBROUTINE grid ! stretched and staggered grid REAL(num) :: dx, dy, dz, xcstar, ycstar, zcstar INTEGER :: ix, iy ALLOCATE(xb_global(-2:nx_global+2), dxnew(-2:nx_global+2)) ALLOCATE(yb_global(-2:ny_global+2), dynew(-2:ny_global+2)) ALLOCATE(zb_global(-2:nz_global+2), dznew(-2:nz_global+2)) ! initially assume uniform grid dx = 1.0_num / REAL(nx_global, num) dy = 1.0_num / REAL(ny_global, num) dz = 1.0_num / REAL(nz_global, num) length_x = x_end - x_start length_y = y_end - y_start length_z = z_end - z_start xb_global(0) = 0.0_num ! grid cell boundary for x coordinates DO ix = -2, nx_global+2 xb_global(ix) = xb_global(0) + REAL(ix, num) * dx END DO xb_global = xb_global * (x_end - x_start) + x_start IF (x_stretch) CALL stretch_x ! stretch grid ? ! define position of ghost cells using sizes of adjacent cells xb_global(nx_global+1) = xb_global(nx_global) & + (xb_global(nx_global) - xb_global(nx_global-1)) ! needed for ghost cell xb_global(nx_global+2) = xb_global(nx_global+1) & + (xb_global(nx_global+1) - xb_global(nx_global)) xb_global(-1) = xb_global( 0) - (xb_global(1) - xb_global( 0)) xb_global(-2) = xb_global(-1) - (xb_global(0) - xb_global(-1)) xb = xb_global(coordinates(3)*nx-2:coordinates(3)*nx+nx+2) DO ix = -1, nx+2 ixm = ix - 1 xc(ix) = 0.5_num * (xb(ixm) + xb(ix)) ! cell centre END DO DO ix = -1, nx+1 ixp = ix + 1 dxc(ix) = xc(ixp) - xc(ix) ! distance between centres END DO IF (coordinates(3) == nprocx - 1) THEN dxc(nx+2) = dxc(nx+1) ELSE xcstar = 0.5_num * (xb(nx+2) + xb_global(coordinates(3)*nx+nx+3)) dxc(nx+2) = xcstar - xc(nx+2) END IF DO ix = -1, nx+2 ixm = ix - 1 dxb(ix) = xb(ix) - xb(ixm) ! cell width END DO yb_global(0) = 0.0_num ! grid cell boundary for y coordinates DO iy = -2, ny_global+2 yb_global(iy) = yb_global(0) + REAL(iy, num) * dy END DO yb_global = yb_global * (y_end - y_start) + y_start IF (y_stretch) CALL stretch_y ! stretch grid ? ! define position of ghost cells using sizes of adjacent cells yb_global(ny_global+1) = yb_global(ny_global) & + (yb_global(ny_global) - yb_global(ny_global-1)) ! needed for ghost cell yb_global(ny_global+2) = yb_global(ny_global+1) & + (yb_global(ny_global+1) - yb_global(ny_global)) yb_global(-1) = yb_global( 0) - (yb_global(1) - yb_global( 0)) yb_global(-2) = yb_global(-1) - (yb_global(0) - yb_global(-1)) yb = yb_global(coordinates(2)*ny-2:coordinates(2)*ny+ny+2) DO iy = -1, ny+2 iym = iy - 1 yc(iy) = 0.5_num * (yb(iym) + yb(iy)) ! cell centre END DO DO iy = -1, ny+1 iyp = iy + 1 dyc(iy) = yc(iyp) - yc(iy) ! distance between centres END DO IF (coordinates(2) == nprocy - 1) THEN dyc(ny+2) = dyc(ny+1) ELSE ycstar = 0.5_num * (yb(ny+2) + yb_global(coordinates(2)*ny+ny+3)) dyc(ny+2) = ycstar - yc(ny+2) END IF DO iy = -1, ny+2 iym = iy - 1 dyb(iy) = yb(iy) - yb(iym) ! cell width END DO zb_global(0) = 0.0_num ! grid cell boundary for z coordinates DO iz = -2, nz_global+2 zb_global(iz) = zb_global(0) + REAL(iz, num) * dz END DO zb_global = zb_global * (z_end - z_start) + z_start IF (z_stretch) CALL stretch_z ! stretch grid ? ! define position of ghost cells using sizes of adjacent cells zb_global(nz_global+1) = zb_global(nz_global) & + (zb_global(nz_global) - zb_global(nz_global-1)) ! needed for ghost cell zb_global(nz_global+2) = zb_global(nz_global+1) & + (zb_global(nz_global+1) - zb_global(nz_global)) zb_global(-1) = zb_global( 0) - (zb_global(1) - zb_global( 0)) zb_global(-2) = zb_global(-1) - (zb_global(0) - zb_global(-1)) zb = zb_global(coordinates(1)*nz-2:coordinates(1)*nz+nz+2) DO iz = -1, nz+2 izm = iz - 1 zc(iz) = 0.5_num * (zb(izm) + zb(iz)) ! cell centre END DO DO iz = -1, nz+1 izp = iz + 1 dzc(iz) = zc(izp) - zc(iz) ! distance between centres END DO IF (coordinates(1) == nprocz - 1) THEN dzc(nz+2) = dzc(nz+1) ELSE zcstar = 0.5_num * (zb(nz+2) + zb_global(coordinates(1)*nz+nz+3)) dzc(nz+2) = zcstar - zc(nz+2) END IF DO iz = -1, nz+2 izm = iz - 1 dzb(iz) = zb(iz) - zb(izm) ! cell width END DO DO ix = -1, nx+2 DO iy = -1, ny+2 DO iz = -1, nz+2 cv(ix, iy, iz) = dxb(ix) * dyb(iy) * dzb(iz) ! define the cell area END DO END DO END DO DEALLOCATE(dxnew, dynew, dznew) END SUBROUTINE grid ! Subroutine stretches the grid in the x direction SUBROUTINE stretch_x ! replace with any stretching algorithm as needed REAL(num) :: width, dx, L, f, lx_new ! new total length lx_new = 200.0_num ! centre of tanh stretching in unstretched coordinates L = length_x / 1.5_num ! width of tanh stretching in unstretched coordinates width = length_x / 10.0_num f = (lx_new - length_x) / (length_x - L) / 2.0_num dx = length_x / REAL(nx_global, num) dxnew = dx + f * (1.0_num + TANH((ABS(xb_global) - L) / width)) * dx !!$ DO ix = nx_global/2+1, nx_global+2 !!$ xb_global(ix) = xb_global(ix-1) + dxnew(ix) !!$ END DO !!$ DO ix = nx_global/2-1, -2, -1 !!$ xb_global(ix) = xb_global(ix+1) - dxnew(ix) !!$ END DO DO ix = 1, nx_global+2 xb_global(ix) = xb_global(ix-1) + dxnew(ix) END DO length_x = lx_new END SUBROUTINE stretch_x ! Subroutine stretches the domain in the y direction SUBROUTINE stretch_y ! stretch domain upwards only REAL(num) :: width, dy, L, f, ly_new ! new tolal length ly_new = 100.0_num ! centre of tanh stretching in unstretched coordinates L = length_y / 1.5_num ! width of tanh stretching in unstretched coordinates width = length_y / 10.0_num f = (ly_new - length_y) / (length_y - L) / 2.0_num dy = length_y / REAL(ny_global, num) dynew = dy + f * (1.0_num + TANH((ABS(yb_global) - L) / width)) * dy !!$ DO iy = ny_global/2+1, ny_global+2 !!$ yb_global(iy) = yb_global(iy-1) + dynew(iy) !!$ END DO !!$ DO iy = ny_global/2-1, -2, -1 !!$ yb_global(iy) = yb_global(iy+1) - dynew(iy) !!$ END DO DO iy = 1, ny_global+2 yb_global(iy) = yb_global(iy-1) + dynew(iy) END DO length_y = ly_new END SUBROUTINE stretch_y ! Subroutine stretches the domain in the z direction SUBROUTINE stretch_z REAL(num) :: width, dz, L, f, lz_new ! new tolal length lz_new = 33.0_num ! centre of tanh stretching in unstretched coordinates L = 2.0_num * length_z / 3.0_num ! width of tanh stretching in unstretched coordinates width = length_z / 10.0_num f = (lz_new - length_z) / (length_z - L) / 2.0_num dz = length_z / REAL(nz_global, num) dznew = dz + f * (1.0_num + TANH((ABS(zb_global) - L) / width)) * dz DO iz = 1, nz_global+2 zb_global(iz) = zb_global(iz-1) + dznew(iz) END DO END SUBROUTINE stretch_z ! Open the output diagnostic files SUBROUTINE open_files CHARACTER(LEN = 11+data_dir_max_length) :: file2 CHARACTER(LEN = 7+data_dir_max_length) :: file3 INTEGER :: ios IF (rank == 0) THEN WRITE(file2, '(a, "/lare3d.dat")') TRIM(data_dir) OPEN(unit = 20, STATUS = 'REPLACE', FILE = file2, iostat = ios) IF (ios .NE. 0) THEN PRINT *, "Unable to open file lare3d.dat for writing. This is ", & "most commonly caused by the output directory not existing" PRINT *, " " PRINT *, " " CALL MPI_ABORT(comm, errcode) END IF WRITE(file3, '(a, "/en.dat")') TRIM(data_dir) OPEN(unit = 30, STATUS = 'REPLACE', FILE = file3, & FORM = "binary", iostat = ios) IF (ios .NE. 0) THEN PRINT *, "Unable to open file en.dat for writing. This is ", & "most commonly caused by the output directory not existing" PRINT *, " " PRINT *, " " CALL MPI_ABORT(comm, errcode) END IF END IF END SUBROUTINE open_files ! Close the output diagnostic files SUBROUTINE close_files IF (rank == 0) THEN CLOSE(unit = 20) CLOSE(unit = 30) END IF END SUBROUTINE close_files ! Subroutine to perform string comparisons FUNCTION str_cmp(str_in, str_test) CHARACTER(*), INTENT(IN) :: str_in, str_test CHARACTER(30) :: str_trim LOGICAL :: str_cmp str_trim = TRIM(ADJUSTL(str_in)) IF (LEN(str_test) .GT. LEN(str_in)) THEN str_cmp = .FALSE. RETURN END IF IF (str_trim(LEN(str_test)+1:LEN(str_test)+1) .NE. " ") THEN str_cmp = .FALSE. RETURN END IF str_cmp = str_trim(1:LEN(str_test)) == str_test END FUNCTION str_cmp ! Restart from previous output dumps SUBROUTINE restart_data CHARACTER(LEN = 20+data_dir_max_length) :: filename CHARACTER(LEN = 20) :: name, class, mesh_name, mesh_class INTEGER :: nblocks, type, nd, sof, snap INTEGER, DIMENSION(3) :: dims REAL(dbl) :: time_d REAL(num), DIMENSION(3) :: extent REAL(num), DIMENSION(3) :: stagger REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: data ! Create the filename for the last snapshot #ifdef MHDCLUSTER WRITE(filename, '("nfs:", a, "/", i4.4, ".cfd")') & TRIM(data_dir), restart_snapshot #else WRITE(filename, '(a, "/", i4.4, ".cfd")') & TRIM(data_dir), restart_snapshot #endif output_file = restart_snapshot ALLOCATE(data(0:nx, 0:ny, 0:nz)) CALL cfd_open(filename, rank, comm, MPI_MODE_RDONLY) ! Open the file nblocks = cfd_get_nblocks() DO ix = 1, nblocks CALL cfd_get_next_block_info_all(name, class, type) IF (rank == 0) PRINT *, ix, name, class, type IF (type == TYPE_SNAPSHOT) THEN CALL cfd_get_snapshot(time_d, snap) time = time_d END IF IF (type == TYPE_MESH) THEN ! Strangely, LARE doesn't actually read in the grid from a file ! This can be fixed, but for the moment, just go with the flow and ! Replicate the old behaviour CALL cfd_skip_block() ELSE IF (type == TYPE_MESH_VARIABLE) THEN CALL cfd_get_common_meshtype_metadata_all(type, nd, sof) IF (nd /= DIMENSION_3D) THEN IF (rank == 0) PRINT *, "Non 3D Dataset found in input file, ", & "ignoring and continuting." CALL cfd_skip_block() CYCLE END IF IF (type /= VAR_CARTESIAN) THEN IF (rank == 0) PRINT *, "Non - Cartesian variable block found ", & "in file, ignoring and continuing" CALL cfd_skip_block() CYCLE END IF ! We now have a valid variable, let's load it up ! First error trapping CALL cfd_get_nd_cartesian_variable_metadata_all(nd, dims, extent, & stagger, mesh_name, mesh_class) IF (dims(1) /= nx_global+1 & .OR. dims(2) /= ny_global+1 .OR. dims(3) /= nz_global+1) THEN IF (rank == 0) PRINT *, "Size of grid represented by one more ", & "variables invalid. Continuing" CALL cfd_skip_block CYCLE END IF IF (sof /= num) THEN IF (rank == 0) PRINT *, "Precision of data does not match ", & "precision of code. Continuing." CALL cfd_skip_block END IF ! We're not interested in the other parameters, so if we're here, ! load up the data CALL cfd_get_3d_cartesian_variable_parallel(data, subtype) ! Now have the data, just copy it to correct place IF (str_cmp(name(1:3), "Rho")) THEN rho(0:nx, 0:ny, 0:nz) = data END IF IF (str_cmp(name(1:6), "Energy")) THEN energy(0:nx, 0:ny, 0:nz) = data END IF IF (str_cmp(name(1:2), "Vx")) THEN vx(0:nx, 0:ny, 0:nz) = data END IF IF (str_cmp(name(1:2), "Vy")) THEN vy(0:nx, 0:ny, 0:nz) = data END IF IF (str_cmp(name(1:2), "Vz")) THEN vz(0:nx, 0:ny, 0:nz) = data END IF IF (str_cmp(name(1:2), "Bx")) THEN bx(0:nx, 0:ny, 0:nz) = data END IF IF (str_cmp(name(1:2), "By")) THEN by(0:nx, 0:ny, 0:nz) = data END IF IF (str_cmp(name(1:2), "Bz")) THEN bz(0:nx, 0:ny, 0:nz) = data END IF ! Should be at end of block, but force the point anyway CALL cfd_skip_block() ELSE ! Unknown block, just skip it CALL cfd_skip_block() END IF END DO DEALLOCATE(data) CALL cfd_close() CALL MPI_BARRIER(comm, errcode) END SUBROUTINE restart_data END MODULE setup Lare3d/src/core/shared_data.F90000644 000765 000024 00000011164 11430520723 016522 0ustar00Tonystaff000000 000000 !**** ! All global variables defined here (cf F77 COMMON block). ! All the names in here are public provided the MODULE is USE'd !**** MODULE constants IMPLICIT NONE #ifdef Q_SINGLE INTEGER, PARAMETER :: num = KIND(1.0) #else INTEGER, PARAMETER :: num = KIND(1.D0) #endif INTEGER, PARAMETER :: dbl = KIND(1.D0) REAL(num), PARAMETER :: pi = 3.14159265358979323_num REAL(num), PARAMETER :: none_zero = TINY(1.0_num) REAL(num), PARAMETER :: largest_number = HUGE(1.0_num) INTEGER, PARAMETER :: BC_PERIODIC = 1, BC_OTHER = 2 INTEGER, PARAMETER :: BC_OPEN = 3 INTEGER, PARAMETER :: version = 2, revision = 2 ! IC codes ! This is a bitmask, remember that INTEGER, PARAMETER :: IC_NEW = 1, IC_RESTART = 2 ! Equation of state codes INTEGER, PARAMETER :: EOS_IDEAL = 1, EOS_ION = 2, EOS_PI = 3 END MODULE constants MODULE shared_data USE constants IMPLICIT NONE INCLUDE 'mpif.h' #ifdef Q_SINGLE INTEGER :: mpireal = MPI_REAL #else INTEGER :: mpireal = MPI_DOUBLE_PRECISION #endif INTEGER :: nx_global, ny_global, nz_global ! NB: as there are now 2 ghost celss so indexing will fail if (nx, ny, nz)<2 INTEGER :: nx, ny, nz INTEGER :: nsteps REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: rho, energy REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: bx, vx, vx1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: by, vy, vy1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: bz, vz, vz1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: bx1, by1, bz1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: jx_r, jy_r, jz_r REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: delta_ke, p_visc REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: eta, lambda_i REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: cv, cv1, bzone REAL(num), DIMENSION(:), ALLOCATABLE :: xc, xb REAL(num), DIMENSION(:), ALLOCATABLE :: dxb, dxc REAL(num), DIMENSION(:), ALLOCATABLE :: xb_global, yb_global, zb_global REAL(num), DIMENSION(:), ALLOCATABLE :: yc, yb REAL(num), DIMENSION(:), ALLOCATABLE :: dyb, dyc, grav REAL(num), DIMENSION(:), ALLOCATABLE :: zc, zb REAL(num), DIMENSION(:), ALLOCATABLE :: dzb, dzc REAL(num), DIMENSION(:, :), ALLOCATABLE :: dv_left, dv_right REAL(num), DIMENSION(:, :), ALLOCATABLE :: dv_up, dv_down REAL(num), DIMENSION(:, :), ALLOCATABLE :: dv_back, dv_front INTEGER, PARAMETER :: data_dir_max_length = 64 CHARACTER(LEN = data_dir_max_length) :: data_dir REAL(num) :: w1, w2, w3, w4, w5, w6, w7, w8 REAL(num) :: dt, dt2, dtr, dth, t_end, time REAL(num) :: dt_multiplier = 0.8_num REAL(num) :: length_x, length_y, length_z, visc1, visc2, visc3 REAL(num) :: x_start, x_end, y_start, y_end, z_start, z_end REAL(num) :: gamma, eta0, j_max, dt_snapshots, lambda0, eta_background REAL(num) :: total_visc_heating = 0.0_num, total_ohmic_heating = 0.0_num REAL(num) :: vc INTEGER :: xbc_right, ybc_up, xbc_left, ybc_down, zbc_front, zbc_back INTEGER :: ix, iy, iz, ixp, iyp, izp, ixm, iym, izm, xpass, ypass, zpass INTEGER :: restart_snapshot INTEGER :: peak_substeps = 0 LOGICAL :: x_stretch, y_stretch, z_stretch, rke LOGICAL :: resistive_mhd, any_open, hall_mhd LOGICAL :: restart ! Heat conduction LOGICAL :: conduction LOGICAL :: heat_flux_limiter REAL(num) :: kappa_0, flux_limiter, temperature_100mk ! RTV radiation LOGICAL :: radiation ! Code normalisation LOGICAL :: SI ! Driving REAL(num) :: drv_amp = 0.0_num, omega = 2.0_num * pi / 1.0_num ! Equation of state INTEGER :: eos_number = EOS_IDEAL ! Damping boundary variables LOGICAL :: damping REAL(num) :: damp_rate ! Partially ionised plasma REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: eta_perp, xi_n, eta_perp0 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: parallel_current, perp_current LOGICAL :: include_neutrals, cowling_resistivity REAL(num) :: f_bar, t_bar, tr, ionise_pot, r_bar REAL(num) :: eta_bar ! MPI data INTEGER :: rank, left, right, up, down, front, back, coordinates(3) INTEGER :: errcode, comm, tag, nproc, nprocx, nprocy, nprocz INTEGER :: status(MPI_STATUS_SIZE) ! file handling INTEGER :: subtype, obstype INTEGER(KIND = MPI_OFFSET_KIND) :: initialdisp INTEGER :: initial INTEGER :: n_zeros = 4 INTEGER :: output_file = 0 ! Number of variables to dump LOGICAL, DIMENSION(19) :: dump_mask END MODULE shared_data ! The pre-processor removes the following line so it compiles without error ! unless the pre-processor hasn't been run over it #ifdef PRECHECK This line deliberately breaks the compile IF the preprocessor has not worked. #endif Lare3d/src/core/welcome.f90000644 000765 000024 00000004700 11430520723 015754 0ustar00Tonystaff000000 000000 MODULE welcome USE shared_data IMPLICIT NONE PRIVATE PUBLIC :: welcome_message CONTAINS SUBROUTINE welcome_message INTEGER, PARAMETER :: LOGOX = 34, LOGOY = 11 INTEGER, DIMENSION(LOGOX, LOGOY) :: LOGO CHARACTER(LOGOX*2+1) :: LOGOSTRING CHARACTER, DIMENSION(5) :: LOGOELS CHARACTER, DIMENSION(4) :: clrstr = (/ ' ', '[', '2', 'J' /) INTEGER :: ix, iy IF (rank .NE. 0) RETURN clrstr(1) = CHAR(27) WRITE(*, '(1x, 4a1)') clrstr LOGOELS = (/ ' ', '@', " ", " ", " " /) PRINT *, "" PRINT *, "" LOGO(:, 1) = (/ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, & 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 /) LOGO(:, 2) = (/ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 /) LOGO(:, 3) = (/ 3, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, & 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 4 /) LOGO(:, 4) = (/ 3, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, & 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 4 /) LOGO(:, 5) = (/ 3, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, & 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 4 /) LOGO(:, 6) = (/ 3, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, & 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 4 /) LOGO(:, 7) = (/ 3, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, & 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 4 /) LOGO(:, 8) = (/ 3, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, & 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 4 /) LOGO(:, 9) = (/ 3, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, & 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 4 /) LOGO(:, 10) = (/ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 /) LOGO(:, 11) = (/ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, & 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 /) LOGOSTRING = " " DO iy = 1, LOGOY*2+1 DO ix = 1, LOGOX LOGOSTRING(ix*2-1:ix*2-1) = LOGOELS(LOGO(ix, MAX(iy/2, 1)) + 1) LOGOSTRING(ix*2:ix*2) = LOGOELS(LOGO(ix, MAX(iy/2, 1)) + 1) END DO WRITE(*, *), LOGOSTRING END DO WRITE(*, *) "" WRITE(*, '("Welcome to Lare3D Version ", I1, ".", I1, "BETA")'), & version, revision WRITE(*, *) "" END SUBROUTINE welcome_message END MODULE welcome Lare3d/src/core/xremap.f90000644 000765 000024 00000067314 11430520723 015627 0ustar00Tonystaff000000 000000 !------------------------------------------------------------------------- ! mass coordinate based Van Leer limited remap. ! See Bram van Leer, JCP, vol 135, p229, (1997) ! Now rewritten to allow compiler vectorizing of loops ! See notes in code !------------------------------------------------------------------------- MODULE xremap USE shared_data; USE boundary IMPLICIT NONE PRIVATE PUBLIC :: remap_x REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: rho1, dm, cv2, flux, dxb1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: rho_v, rho_v1 CONTAINS SUBROUTINE remap_x ! remap onto original Eulerian grid REAL(num) :: vxb, vxbm, vyb, vybm, vzb, vzbm, dv ALLOCATE (rho1(-1:nx+2, -1:ny+2, -1:nz+2), dm(-1:nx+2, -1:ny+2, -1:nz+2), & cv2(-1:nx+2, -1:ny+2, -1:nz+2), flux(-2:nx+2, -1:ny+2, -1:nz+2), & dxb1(-1:nx+2, -1:ny+2, -1:nz+2), rho_v(-1:nx+2, -1:ny+2, -1:nz+2), & rho_v1(-1:nx+2, -1:ny+2, -1:nz+2)) dm = 0.0_num rho1 = rho ! store initial density in rho1 DO iz = -1, nz+2 izm = iz - 1 DO iy = -1, ny+2 iym = iy - 1 DO ix = -1, nx+2 ixm = ix - 1 ! vx at Sx(i, j, k) vxb = (vx1(ix, iy, iz) + vx1(ix, iym, iz) & + vx1(ix, iy, izm) + vx1(ix, iym, izm)) / 4.0_num ! vx at Sx(i-1, j, k) vxbm = (vx1(ixm, iy, iz) + vx1(ixm, iym, iz) & + vx1(ixm, iy, izm) + vx1(ixm, iym, izm)) / 4.0_num ! vy at Sy(i, j, k) vyb = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iy, izm) + vy1(ixm, iy, izm)) / 4.0_num ! vy at Sy(i, j-1, k) vybm = (vy1(ix, iym, iz) + vy1(ixm, iym, iz) & + vy1(ix, iym, izm) + vy1(ixm, iym, izm)) / 4.0_num ! vz at Sz(i, j, k) vzb = (vz1(ix, iy, iz) + vz1(ixm, iy, iz) & + vz1(ix, iym, iz) + vz1(ixm, iym, iz)) / 4.0_num ! vz at Sz(i, j, k-1) vzbm = (vz1(ix, iy, izm) + vz1(ixm, iy, izm) & + vz1(ix, iym, izm) + vz1(ixm, iym, izm)) / 4.0_num dv = (REAL(ypass, num) * (vyb - vybm) / dyb(iy) & + REAL(zpass, num) * (vzb - vzbm) / dzb(iz) & + (vxb - vxbm) / dxb(ix)) * dt ! control volume before remap cv1(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) dv = (REAL(ypass, num) * (vyb - vybm) / dyb(iy) & + REAL(zpass, num) * (vzb - vzbm) / dzb(iz)) * dt ! control volume after remap cv2(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) ! dxb before remap dxb1(ix, iy, iz) = dxb(ix) + (vxb - vxbm) * dt END DO END DO END DO ! Evans and Hawley (ApJ, vol 332, p650, (1988)) ! constrained transport remap of magnetic fluxes CALL vx_by_flux DO iz = 1, nz DO iy = 0, ny DO ix = 1, nx ixm = ix - 1 by(ix, iy, iz) = by(ix, iy, iz) - flux(ix, iy, iz) + flux(ixm, iy, iz) END DO END DO END DO DO iz = 1, nz DO iy = 1, ny iym = iy - 1 DO ix = 0, nx bx(ix, iy, iz) = bx(ix, iy, iz) + flux(ix, iy, iz) - flux(ix, iym, iz) END DO END DO END DO CALL vx_bz_flux DO iz = 0, nz DO iy = 1, ny DO ix = 1, nx ixm = ix - 1 bz(ix, iy, iz) = bz(ix, iy, iz) - flux(ix, iy, iz) + flux(ixm, iy, iz) END DO END DO END DO DO iz = 1, nz izm = iz - 1 DO iy = 1, ny DO ix = 0, nx bx(ix, iy, iz) = bx(ix, iy, iz) + flux(ix, iy, iz) - flux(ix, iy, izm) END DO END DO END DO ! remap of mass + calculation of mass fluxes (dm) needed for later remaps CALL x_mass_flux ! calculates dm(0:nx, 0:ny+1) CALL dm_x_bcs ! need dm(0:nx+1, 0:ny+1) for velocity remap DO iz = 1, nz DO iy = 1, ny DO ix = 1, nx ixm = ix - 1 rho(ix, iy, iz) = (rho1(ix, iy, iz) * cv1(ix, iy, iz) & + dm(ixm, iy, iz) - dm(ix, iy, iz)) / cv2(ix, iy, iz) END DO END DO END DO ! remap specific energy density using mass coordinates CALL x_energy_flux DO iz = 1, nz DO iy = 1, ny DO ix = 1, nx ixm = ix - 1 energy(ix, iy, iz) = (energy(ix, iy, iz) * cv1(ix, iy, iz) & * rho1(ix, iy, iz) + flux(ixm, iy, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho(ix, iy, iz)) END DO END DO END DO ! redefine dxb1, cv1, cv2, dm and vx1 for velocity (vertex) cells ! in some of these calculations the flux variable is used as a ! temporary array DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = -1, nx+1 ixp = ix + 1 ! vertex density before remap rho_v(ix, iy, iz) = rho1(ix, iy, iz) * cv1(ix, iy, iz) & + rho1(ixp, iy , iz ) * cv1(ixp, iy , iz ) & + rho1(ix , iyp, iz ) * cv1(ix , iyp, iz ) & + rho1(ixp, iyp, iz ) * cv1(ixp, iyp, iz ) & + rho1(ix , iy , izp) * cv1(ix , iy , izp) & + rho1(ixp, iy , izp) * cv1(ixp, iy , izp) & + rho1(ix , iyp, izp) * cv1(ix , iyp, izp) & + rho1(ixp, iyp, izp) * cv1(ixp, iyp, izp) rho_v(ix, iy, iz) = rho_v(ix, iy, iz) & / (cv1(ix, iy , iz ) + cv1(ixp, iy , iz ) & + cv1(ix, iyp, iz ) + cv1(ixp, iyp, iz ) & + cv1(ix, iy , izp) + cv1(ixp, iy , izp) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp)) END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = cv1(ix, iy, iz) + cv1(ixp, iy, iz) & + cv1(ix, iyp, iz ) + cv1(ixp, iyp, iz ) & + cv1(ix, iy , izp) + cv1(ixp, iy , izp) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp) END DO END DO END DO ! cv1 = vertex CV before remap cv1(0:nx, 0:ny, 0:nz) = flux(0:nx, 0:ny, 0:nz) / 8.0_num DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = cv2(ix, iy, iz) + cv2(ixp, iy, iz) & + cv2(ix, iyp, iz ) + cv2(ixp, iyp, iz ) & + cv2(ix, iy , izp) + cv2(ixp, iy , izp) & + cv2(ix, iyp, izp) + cv2(ixp, iyp, izp) END DO END DO END DO ! cv2 = vertex CV after remap cv2(0:nx, 0:ny, 0:nz) = flux(0:nx, 0:ny, 0:nz) / 8.0_num DO iz = 0, nz DO iy = 0, ny DO ix = -2, nx+1 ixp = ix + 1 flux(ix, iy, iz) = (vx1(ix, iy, iz) + vx1(ixp, iy, iz)) / 2.0_num END DO END DO END DO ! vertex boundary velocity used in remap vx1(-2:nx+1, 0:ny, 0:nz) = flux(-2:nx+1, 0:ny, 0:nz) DO iz = 0, nz DO iy = 0, ny DO ix = -1, nx+1 ixm = ix - 1 ! dxb1 = width of vertex CV before remap dxb1(ix, iy, iz) = dxc(ix) + (vx1(ix, iy, iz) - vx1(ixm, iy, iz)) * dt END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = -1, nx ixp = ix + 1 flux(ix, iy, iz) = dm(ix, iy, iz) + dm(ixp, iy, iz) & + dm(ix, iyp, iz ) + dm(ixp, iyp, iz ) & + dm(ix, iy , izp) + dm(ixp, iy , izp) & + dm(ix, iyp, izp) + dm(ixp, iyp, izp) END DO END DO END DO ! mass flux out of vertex CV dm(-1:nx, 0:ny, 0:nz) = flux(-1:nx, 0:ny, 0:nz) / 8.0_num DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx ixm = ix - 1 ! vertex density after remap rho_v1(ix, iy, iz) = (rho_v(ix, iy, iz) * cv1(ix, iy, iz) & + dm(ixm, iy, iz) - dm(ix, iy, iz)) / cv2(ix, iy, iz) END DO END DO END DO CALL x_momy_flux DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx ixm = ix - 1 vy(ix, iy, iz) = (rho_v(ix, iy, iz) * vy(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ixm, iy, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL x_momz_flux DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx ixm = ix - 1 vz(ix, iy, iz) = (rho_v(ix, iy, iz) * vz(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ixm, iy, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL x_momx_flux DO iz = 0, nz DO iy = 0, ny DO ix = 0, nx ixm = ix - 1 vx(ix, iy, iz) = (rho_v(ix, iy, iz) * vx(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ixm, iy, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL boundary_conditions DEALLOCATE (rho1, dm, cv2, flux, dxb1, rho_v, rho_v1) xpass = 0 END SUBROUTINE remap_x SUBROUTINE vx_by_flux REAL(num) :: v_advect, vad_p, vad_m REAL(num) :: db, dbxp, dbxp2, dbxm INTEGER :: ixp2 DO iz = 0, nz izm = iz - 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 ixp = ix + 1 ixp2 = ix + 2 v_advect = (vx1(ix, iy, iz) + vx1(ix, iy, izm)) / 2.0_num db = (dxb1(ix , iy, iz) + dxb1(ix , iyp, iz)) / 2.0_num dbxp = (dxb1(ixp , iy, iz) + dxb1(ixp , iyp, iz)) / 2.0_num dbxp2 = (dxb1(ixp2, iy, iz) + dxb1(ixp2, iyp, iz)) / 2.0_num dbxm = (dxb1(ixm , iy, iz) + dxb1(ixm , iyp, iz)) / 2.0_num w4 = by(ix , iy, iz) / db w5 = by(ixp, iy, iz) / dbxp flux(ix, iy, iz) = (MAX(0.0_num, v_advect) * w4 & + MIN(0.0_num, v_advect) * w5) * dt w1 = by(ixp , iy, iz) / dbxp - by(ix , iy, iz) / db w2 = by(ix , iy, iz) / db - by(ixm, iy, iz) / dbxm w3 = by(ixp2, iy, iz) / dbxp2 - by(ixp, iy, iz) / dbxp ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt / (db * vad_p + dbxp * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dxc(ix) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dxc(ixm) * vad_p + dxc(ixp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dxb(ix) * vad_p + dxb(ixp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = flux(ix, iy, iz) & + v_advect * dt * w6 * (1.0_num - w5) END DO END DO END DO END SUBROUTINE vx_by_flux SUBROUTINE vx_bz_flux REAL(num) :: v_advect, vad_p, vad_m REAL(num) :: db, dbxp, dbxp2, dbxm INTEGER :: ixp2 DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 ixp = ix + 1 ixp2 = ix + 2 v_advect = (vx1(ix, iy, iz) + vx1(ix, iym, iz)) / 2.0_num db = (dxb1(ix , iy, iz) + dxb1(ix , iy, izp)) / 2.0_num dbxp = (dxb1(ixp , iy, iz) + dxb1(ixp , iy, izp)) / 2.0_num dbxp2 = (dxb1(ixp2, iy, iz) + dxb1(ixp2, iy, izp)) / 2.0_num dbxm = (dxb1(ixm , iy, iz) + dxb1(ixm , iy, izp)) / 2.0_num w4 = bz(ix , iy, iz) / db w5 = bz(ixp, iy, iz) / dbxp flux(ix, iy, iz) = (MAX(0.0_num, v_advect) * w4 & + MIN(0.0_num, v_advect) * w5) * dt w1 = bz(ixp , iy, iz) / dbxp - bz(ix , iy, iz) / db w2 = bz(ix , iy, iz) / db - bz(ixm, iy, iz) / dbxm w3 = bz(ixp2, iy, iz) / dbxp2 - bz(ixp, iy, iz) / dbxp ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization. See example in **SECTION 2** vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) ! This code is the vectorizable replacement for the code ! in **SECTION 2** w5 = ABS(v_advect) * dt / (db * vad_p + dbxp * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dxc(ix) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dxc(ixm) * vad_p + dxc(ixp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dxb(ix) * vad_p + dxb(ixp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = flux(ix, iy, iz) & + v_advect * dt * w6 * (1.0_num - w5) !!$ !**SECTION 2** !!$ IF (v_advect > 0.0) THEN !!$ w5 = ABS(v_advect) * dt / db !!$ w4 = (2.0_num - w5) * ABS(w1) / dxc(ix) & !!$ + (1.0_num + w5) * ABS(w2) / dxc(ixm) !!$ w4 = w4 / 6.0_num !!$ w4 = ABS(w1) / dxc(ix) !!$ w4 = w4 / 2.0_num !!$ w8 = 0.5_num * (SIGN(1.0_num, w1) + SIGN(1.0_num, w2)) !!$ w6 = w8 * MIN(ABS(w4)*dxb(ix), ABS(w1), ABS(w2)) !!$ flux(ix, iy, iz) = flux(ix, iy, iz) & !!$ + v_advect * dt * w6 * (1.0_num - w5) !!$ ELSE !!$ w5 = ABS(v_advect) * dt / dbxp !!$ w4 = (2.0_num - w5) * ABS(w1) / dxc(ix) & !!$ + (1.0_num + w5) * ABS(w3) / dxc(ixp) !!$ w4 = w4 / 6.0_num !!$ w8 = 0.5_num * (SIGN(1.0_num, w1) + SIGN(1.0_num, w3)) !!$ w6 = -w8 * MIN(ABS(w4)*dxb(ixp), ABS(w1), ABS(w3)) !!$ flux(ix, iy, iz) = flux(ix, iy, iz) & !!$ + v_advect * dt * w6 * (1.0_num - w5) !!$ END IF !!$ !**END SECTION 2** END DO END DO END DO END SUBROUTINE vx_bz_flux SUBROUTINE x_mass_flux REAL(num) :: v_advect, flux_rho, vad_p, vad_m INTEGER :: ixp2 DO iz = 0, nz+1 izm = iz - 1 DO iy = 0, ny+1 iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 ixp = ix + 1 ixp2 = ix + 2 v_advect = (vx1(ix, iy, iz) + vx1(ix, iym, iz) & + vx1(ix, iy, izm) + vx1(ix, iym, izm)) / 4.0_num dm(ix, iy, iz) = (MAX(0.0_num, v_advect) * rho(ix, iy, iz) & + MIN(0.0_num, v_advect) * rho(ixp, iy, iz)) * dt w1 = rho(ixp , iy, iz) - rho(ix , iy, iz) w2 = rho(ix , iy, iz) - rho(ixm, iy, iz) w3 = rho(ixp2, iy, iz) - rho(ixp, iy, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dxb1(ix, iy, iz) * vad_p + dxb1(ixp, iy, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dxc(ix) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dxc(ixm) * vad_p + dxc(ixp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dxb(ix) * vad_p + dxb(ixp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux_rho = v_advect * dt * w6 * (1.0_num - w5) dm(ix, iy, iz) = (flux_rho + dm(ix, iy, iz)) * dyb(iy) * dzb(iz) END DO END DO END DO END SUBROUTINE x_mass_flux SUBROUTINE x_energy_flux ! energy remap in mass coordinates REAL(num) :: v_advect, vad_p, vad_m INTEGER :: ixp2 DO iz = 0, nz izm = iz - 1 DO iy = 0, ny iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 ixp = ix + 1 ixp2 = ix + 2 v_advect = (vx1(ix, iy, iz) + vx1(ix, iym, iz) & + vx1(ix, iy, izm) + vx1(ix, iym, izm)) / 4.0_num w1 = energy(ixp , iy, iz) - energy(ix , iy, iz) w2 = energy(ix , iy, iz) - energy(ixm, iy, iz) w3 = energy(ixp2, iy, iz) - energy(ixp, iy, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dxb1(ix, iy, iz) * vad_p + dxb1(ixp, iy, iz) * vad_m) w7 = energy(ix, iy, iz) * vad_p + energy(ixp, iy, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dyb(iy) / dzb(iz) & / (rho1(ix , iy, iz) * dxb1(ix , iy, iz) * vad_p & + rho1(ixp, iy, iz) * dxb1(ixp, iy, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dxc(ix) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dxc(ixm) * vad_p + dxc(ixp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dxb(ix) * vad_p + dxb(ixp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = dm(ix, iy, iz) * (w7 + w5 * (1.0_num - w6)) END DO END DO END DO END SUBROUTINE x_energy_flux SUBROUTINE x_momy_flux ! energy remap in mass coordinates REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: ixp2 DO iz = 0, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = -1, nx ixm = ix - 1 ixp = ix + 1 ixp2 = ix + 2 v_advect = vx1(ix, iy, iz) w1 = vy(ixp , iy, iz) - vy(ix , iy, iz) w2 = vy(ix , iy, iz) - vy(ixm, iy, iz) w3 = vy(ixp2, iy, iz) - vy(ixp, iy, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dxb1(ix, iy, iz) * vad_p + dxb1(ixp, iy, iz) * vad_m) w7 = vy(ix, iy, iz) * vad_p + vy(ixp, iy, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dyc(iy) / dzc(iz) & / (rho_v(ix , iy, iz) * dxb1(ix , iy, iz) * vad_p & + rho_v(ixp, iy, iz) * dxb1(ixp, iy, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dxb(ixp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dxb(ix) * vad_p + dxb(ixp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dxc(ix) * vad_p + dxc(ixp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx-1 ixm = ix - 1 ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ixp, iy, iz) * cv2(ixp, iy, iz) ai = (vy(ix, iy, iz) - flux(ixm, iy, iz)) * dm(ixm, iy, iz) / m & + (flux(ix, iy, iz) - vy(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vy(ixp, iy, iz) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ixp, iy, iz) - vy(ixp, iy, iz)) * dm(ixp, iy, iz) / mp dk = (vy(ixp, iy, iz) - vy(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vy(ixp, iy, iz) + vy(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vy(ix, iy, iz)) & + 0.5_num * aip * (vy(ixp, iy, iz) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ixp, iy , iz ) = delta_ke(ixp, iy , iz ) + dk delta_ke(ixp, iyp, iz ) = delta_ke(ixp, iyp, iz ) + dk delta_ke(ixp, iy , izp) = delta_ke(ixp, iy , izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(-1:nx, 0:ny, 0:nz) = flux(-1:nx, 0:ny, 0:nz) * dm(-1:nx, 0:ny, 0:nz) END SUBROUTINE x_momy_flux SUBROUTINE x_momz_flux ! energy remap in mass coordinates REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: ixp2 DO iz = 0, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = -1, nx ixm = ix - 1 ixp = ix + 1 ixp2 = ix + 2 v_advect = vx1(ix, iy, iz) w1 = vz(ixp , iy, iz) - vz(ix , iy, iz) w2 = vz(ix , iy, iz) - vz(ixm, iy, iz) w3 = vz(ixp2, iy, iz) - vz(ixp, iy, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dxb1(ix, iy, iz) * vad_p + dxb1(ixp, iy, iz) * vad_m) w7 = vz(ix, iy, iz) * vad_p + vz(ixp, iy, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dyc(iy) / dzc(iz) & / (rho_v(ix , iy, iz) * dxb1(ix , iy, iz) * vad_p & + rho_v(ixp, iy, iz) * dxb1(ixp, iy, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dxb(ixp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dxb(ix) * vad_p + dxb(ixp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dxc(ix) * vad_p + dxc(ixp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx-1 ixm = ix - 1 ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ixp, iy, iz) * cv2(ixp, iy, iz) ai = (vz(ix, iy, iz) - flux(ixm, iy, iz)) * dm(ixm, iy, iz) / m & + (flux(ix, iy, iz) - vz(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vz(ixp, iy, iz) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ixp, iy, iz) - vz(ixp, iy, iz)) * dm(ixp, iy, iz) / mp dk = (vz(ixp, iy, iz) - vz(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vz(ixp, iy, iz) + vz(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vz(ix, iy, iz)) & + 0.5_num * aip * (vz(ixp, iy, iz) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ixp, iy , iz ) = delta_ke(ixp, iy , iz ) + dk delta_ke(ixp, iyp, iz ) = delta_ke(ixp, iyp, iz ) + dk delta_ke(ixp, iy , izp) = delta_ke(ixp, iy , izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(-1:nx, 0:ny, 0:nz) = flux(-1:nx, 0:ny, 0:nz) * dm(-1:nx, 0:ny, 0:nz) END SUBROUTINE x_momz_flux SUBROUTINE x_momx_flux REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: ixp2 DO iz = 0, nz DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = -1, nx ixm = ix - 1 ixp = ix + 1 ixp2 = ix + 2 v_advect = vx1(ix, iy, iz) w1 = vx(ixp , iy, iz) - vx(ix , iy, iz) w2 = vx(ix , iy, iz) - vx(ixm, iy, iz) w3 = vx(ixp2, iy, iz) - vx(ixp, iy, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dxb1(ix, iy, iz) * vad_p + dxb1(ixp, iy, iz) * vad_m) w7 = vx(ix, iy, iz) * vad_p + vx(ixp, iy, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dyc(iy) / dzc(iz) & / (rho_v(ix , iy, iz) * dxb1(ix , iy, iz) * vad_p & + rho_v(ixp, iy, iz) * dxb1(ixp, iy, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dxb(ixp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dxb(ix) * vad_p + dxb(ixp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dxc(ix) * vad_p + dxc(ixp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx-1 ixm = ix - 1 ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ixp, iy, iz) * cv2(ixp, iy, iz) ai = (vx(ix, iy, iz) - flux(ixm, iy, iz)) * dm(ixm, iy, iz) / m & + (flux(ix, iy, iz) - vx(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vx(ixp, iy, iz) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ixp, iy, iz) - vx(ixp, iy, iz)) * dm(ixp, iy, iz) / mp dk = (vx(ixp, iy, iz) - vx(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vx(ixp, iy, iz) + vx(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vx(ix, iy, iz)) & + 0.5_num * aip * (vx(ixp, iy, iz) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ixp, iy , iz ) = delta_ke(ixp, iy , iz ) + dk delta_ke(ixp, iyp, iz ) = delta_ke(ixp, iyp, iz ) + dk delta_ke(ixp, iy , izp) = delta_ke(ixp, iy , izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(-1:nx, 0:ny, 0:nz) = flux(-1:nx, 0:ny, 0:nz) * dm(-1:nx, 0:ny, 0:nz) END SUBROUTINE x_momx_flux SUBROUTINE dm_x_bcs CALL MPI_SENDRECV(dm(1, 0:ny+1, 0:nz+1), (ny+2)*(nz+2), mpireal, & left, tag, dm(nx+1, 0:ny+1, 0:nz+1), (ny+2)*(nz+2), mpireal, & right, tag, comm, status, errcode) IF (right == MPI_PROC_NULL) & dm(nx+1, 0:ny+1, 0:nz+1) = dm(nx, 0:ny+1, 0:nz+1) CALL MPI_SENDRECV(dm(nx-1, 0:ny+1, 0:nz+1), (ny+2)*(nz+2), mpireal, & right, tag, dm(-1, 0:ny+1, 0:nz+1), (ny+2)*(nz+2), mpireal, & left, tag, comm, status, errcode) IF (left == MPI_PROC_NULL) & dm(-1, 0:ny+1, 0:nz+1) = dm(0, 0:ny+1, 0:nz+1) END SUBROUTINE dm_x_bcs END MODULE xremap Lare3d/src/core/yremap.f90000644 000765 000024 00000064720 11430520723 015626 0ustar00Tonystaff000000 000000 !------------------------------------------------------------------------- ! mass coordinate based Van Leer limited remap. ! See Bram van Leer, JCP, vol 135, p229, (1997) ! Now rewritten to allow compiler vectorizing of loops ! See notes in code !------------------------------------------------------------------------- MODULE yremap USE shared_data; USE boundary IMPLICIT NONE PRIVATE PUBLIC :: remap_y REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: rho1, dm, cv2, flux, dyb1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: rho_v, rho_v1 CONTAINS SUBROUTINE remap_y ! remap onto original Eulerian grid REAL(num) :: vxb, vxbm, vyb, vybm, vzb, vzbm, dv ALLOCATE (rho1(-1:nx+2, -1:ny+2, -1:nz+2), dm(-1:nx+2, -1:ny+2, -1:nz+2), & cv2(-1:nx+2, -1:ny+2, -1:nz+2), flux(-1:nx+2, -2:ny+2, -1:nz+2), & dyb1(-1:nx+2, -1:ny+2, -1:nz+2), rho_v(-1:nx+2, -1:ny+2, -1:nz+2), & rho_v1(-1:nx+2, -1:ny+2, -1:nz+2)) dm = 0.0_num rho1 = rho ! store initial density in rho1 DO iz = -1, nz+2 izm = iz - 1 DO iy = -1, ny+2 iym = iy - 1 DO ix = -1, nx+2 ixm = ix - 1 ! vx at Sx(i, j, k) vxb = (vx1(ix, iy, iz) + vx1(ix, iym, iz) & + vx1(ix, iy, izm) + vx1(ix, iym, izm)) / 4.0_num ! vx at Sx(i-1, j, k) vxbm = (vx1(ixm, iy, iz) + vx1(ixm, iym, iz) & + vx1(ixm, iy, izm) + vx1(ixm, iym, izm)) / 4.0_num ! vy at Sy(i, j, k) vyb = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iy, izm) + vy1(ixm, iy, izm)) / 4.0_num ! vy at Sy(i, j-1, k) vybm = (vy1(ix, iym, iz) + vy1(ixm, iym, iz) & + vy1(ix, iym, izm) + vy1(ixm, iym, izm)) / 4.0_num ! vz at Sz(i, j, k) vzb = (vz1(ix, iy, iz) + vz1(ixm, iy, iz) & + vz1(ix, iym, iz) + vz1(ixm, iym, iz)) / 4.0_num ! vz at Sz(i, j, k-1) vzbm = (vz1(ix, iy, izm) + vz1(ixm, iy, izm) & + vz1(ix, iym, izm) + vz1(ixm, iym, izm)) / 4.0_num dv = (REAL(xpass, num) * (vxb - vxbm) / dxb(ix) & + REAL(zpass, num) * (vzb - vzbm) / dzb(iz) & + (vyb - vybm) / dyb(iy)) * dt ! control volume before remap cv1(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) dv = (REAL(xpass, num) * (vxb - vxbm) / dxb(ix) & + REAL(zpass, num) * (vzb - vzbm) / dzb(iz)) * dt ! control volume after remap cv2(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) ! dyb before remap dyb1(ix, iy, iz) = dyb(iy) + (vyb - vybm) * dt END DO END DO END DO ! Evans and Hawley (ApJ, vol 332, p650, (1988)) ! constrained transport remap of magnetic fluxes CALL vy_bx_flux DO iz = 1, nz DO iy = 1, ny iym = iy - 1 DO ix = 0, nx bx(ix, iy, iz) = bx(ix, iy, iz) - flux(ix, iy, iz) + flux(ix, iym, iz) END DO END DO END DO DO iz = 1, nz DO iy = 0, ny DO ix = 1, nx ixm = ix - 1 by(ix, iy, iz) = by(ix, iy, iz) + flux(ix, iy, iz) - flux(ixm, iy, iz) END DO END DO END DO CALL vy_bz_flux DO iz = 0, nz DO iy = 1, ny iym = iy - 1 DO ix = 1, nx bz(ix, iy, iz) = bz(ix, iy, iz) - flux(ix, iy, iz) + flux(ix, iym, iz) END DO END DO END DO DO iz = 1, nz izm = iz - 1 DO iy = 0, ny DO ix = 1, nx by(ix, iy, iz) = by(ix, iy, iz) + flux(ix, iy, iz) - flux(ix, iy, izm) END DO END DO END DO ! remap of mass + calculation of mass fluxes (dm) needed for later remaps CALL y_mass_flux ! calculates dm(0:nx, 0:ny+1) CALL dm_y_bcs ! need dm(0:nx+1, 0:ny+1) for velocity remap DO iz = 1, nz DO iy = 1, ny iym = iy - 1 DO ix = 1, nx rho(ix, iy, iz) = (rho1(ix, iy, iz) * cv1(ix, iy, iz) & + dm(ix, iym, iz) - dm(ix, iy, iz)) / cv2(ix, iy, iz) END DO END DO END DO ! remap specific energy density using mass coordinates CALL y_energy_flux DO iz = 1, nz DO iy = 1, ny iym = iy - 1 DO ix = 1, nx energy(ix, iy, iz) = (energy(ix, iy, iz) * cv1(ix, iy, iz) & * rho1(ix, iy, iz) + flux(ix, iym, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho(ix, iy, iz)) END DO END DO END DO ! redefine dyb1, cv1, cv2, dm and vy1 for velocity (vertex) cells ! in some of these calculations the flux variable is used as a ! temporary array DO iz = 0, nz izp = iz + 1 DO iy = -1, ny+1 iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 ! vertex density before remap rho_v(ix, iy, iz) = rho1(ix, iy, iz) * cv1(ix, iy, iz) & + rho1(ixp, iy , iz ) * cv1(ixp, iy , iz ) & + rho1(ix , iyp, iz ) * cv1(ix , iyp, iz ) & + rho1(ixp, iyp, iz ) * cv1(ixp, iyp, iz ) & + rho1(ix , iy , izp) * cv1(ix , iy , izp) & + rho1(ixp, iy , izp) * cv1(ixp, iy , izp) & + rho1(ix , iyp, izp) * cv1(ix , iyp, izp) & + rho1(ixp, iyp, izp) * cv1(ixp, iyp, izp) rho_v(ix, iy, iz) = rho_v(ix, iy, iz) & / (cv1(ix, iy , iz ) + cv1(ixp, iy , iz ) & + cv1(ix, iyp, iz ) + cv1(ixp, iyp, iz ) & + cv1(ix, iy , izp) + cv1(ixp, iy , izp) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp)) END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = cv1(ix, iy, iz) + cv1(ixp, iy, iz) & + cv1(ix, iyp, iz ) + cv1(ixp, iyp, iz ) & + cv1(ix, iy , izp) + cv1(ixp, iy , izp) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp) END DO END DO END DO ! cv1 = vertex CV before remap cv1(0:nx, 0:ny, 0:nz) = flux(0:nx, 0:ny, 0:nz) / 8.0_num DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = cv2(ix, iy, iz) + cv2(ixp, iy, iz) & + cv2(ix, iyp, iz ) + cv2(ixp, iyp, iz ) & + cv2(ix, iy , izp) + cv2(ixp, iy , izp) & + cv2(ix, iyp, izp) + cv2(ixp, iyp, izp) END DO END DO END DO ! cv2 = vertex CV after remap cv2(0:nx, 0:ny, 0:nz) = flux(0:nx, 0:ny, 0:nz) / 8.0_num DO iz = 0, nz DO iy = -2, ny+1 iyp = iy + 1 DO ix = 0, nx flux(ix, iy, iz) = (vy1(ix, iy, iz) + vy1(ix, iyp, iz)) / 2.0_num END DO END DO END DO ! vertex boundary velocity used in remap vy1(0:nx, -2:ny+1, 0:nz) = flux(0:nx, -2:ny+1, 0:nz) DO iz = 0, nz DO iy = -1, ny+1 iym = iy - 1 DO ix = 0, nx ! dyb1 = width of vertex CV before remap dyb1(ix, iy, iz) = dyc(iy) + (vy1(ix, iy, iz) - vy1(ix, iym, iz)) * dt END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = -1, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = dm(ix, iy, iz) + dm(ixp, iy, iz) & + dm(ix, iyp, iz ) + dm(ixp, iyp, iz ) & + dm(ix, iy , izp) + dm(ixp, iy , izp) & + dm(ix, iyp, izp) + dm(ixp, iyp, izp) END DO END DO END DO ! mass flux out of vertex CV dm(0:nx, -1:ny, 0:nz) = flux(0:nx, -1:ny, 0:nz) / 8.0_num DO iz = 0, nz DO iy = 0, ny iym = iy - 1 DO ix = 0, nx ! vertex density after remap rho_v1(ix, iy, iz) = (rho_v(ix, iy, iz) * cv1(ix, iy, iz) & + dm(ix, iym, iz) - dm(ix, iy, iz)) / cv2(ix, iy, iz) END DO END DO END DO CALL y_momz_flux DO iz = 0, nz DO iy = 0, ny iym = iy - 1 DO ix = 0, nx vz(ix, iy, iz) = (rho_v(ix, iy, iz) * vz(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ix, iym, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL y_momx_flux DO iz = 0, nz DO iy = 0, ny iym = iy - 1 DO ix = 0, nx vx(ix, iy, iz) = (rho_v(ix, iy, iz) * vx(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ix, iym, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL y_momy_flux DO iz = 0, nz DO iy = 0, ny iym = iy - 1 DO ix = 0, nx vy(ix, iy, iz) = (rho_v(ix, iy, iz) * vy(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ix, iym, iz) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL boundary_conditions DEALLOCATE (rho1, dm, cv2, flux, dyb1, rho_v, rho_v1) ypass = 0 END SUBROUTINE remap_y SUBROUTINE vy_bx_flux REAL(num) :: v_advect, vad_p, vad_m REAL(num) :: db, dbyp, dbyp2, dbym INTEGER :: iyp2 DO iz = 0, nz izm = iz - 1 DO iy = 0, ny iym = iy - 1 iyp = iy + 1 iyp2 = iy + 2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 v_advect = (vy1(ix, iy, iz) + vy1(ix, iy, izm)) / 2.0_num db = (dyb1(ix, iy , iz) + dyb1(ixp, iy , iz)) / 2.0_num dbyp = (dyb1(ix, iyp , iz) + dyb1(ixp, iyp , iz)) / 2.0_num dbyp2 = (dyb1(ix, iyp2, iz) + dyb1(ixp, iyp2, iz)) / 2.0_num dbym = (dyb1(ix, iym , iz) + dyb1(ixp, iym , iz)) / 2.0_num w4 = bx(ix, iy , iz) / db w5 = bx(ix, iyp, iz) / dbyp flux(ix, iy, iz) = (MAX(0.0_num, v_advect) * w4 & + MIN(0.0_num, v_advect) * w5) * dt w1 = bx(ix, iyp , iz) / dbyp - bx(ix, iy , iz) / db w2 = bx(ix, iy , iz) / db - bx(ix, iym, iz) / dbym w3 = bx(ix, iyp2, iz) / dbyp2 - bx(ix, iyp, iz) / dbyp ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt / (db * vad_p + dbyp * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dyc(iy) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dyc(iym) * vad_p + dyc(iyp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dyb(iy) * vad_p + dyb(iyp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = flux(ix, iy, iz) & + v_advect * dt * w6 * (1.0_num - w5) END DO END DO END DO END SUBROUTINE vy_bx_flux SUBROUTINE vy_bz_flux REAL(num) :: v_advect, vad_p, vad_m REAL(num) :: db, dbyp, dbyp2, dbym INTEGER :: iyp2 DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iym = iy - 1 iyp = iy + 1 iyp2 = iy + 2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 v_advect = (vy1(ix, iy, iz) + vy1(ixm, iy, iz)) / 2.0_num db = (dyb1(ix, iy , iz) + dyb1(ix, iy , izp)) / 2.0_num dbyp = (dyb1(ix, iyp , iz) + dyb1(ix, iyp , izp)) / 2.0_num dbyp2 = (dyb1(ix, iyp2, iz) + dyb1(ix, iyp2, izp)) / 2.0_num dbym = (dyb1(ix, iym , iz) + dyb1(ix, iym , izp)) / 2.0_num w4 = bz(ix, iy , iz) / db w5 = bz(ix, iyp, iz) / dbyp flux(ix, iy, iz) = (MAX(0.0_num, v_advect) * w4 & + MIN(0.0_num, v_advect) * w5) * dt w1 = bz(ix, iyp , iz) / dbyp - bz(ix, iy , iz) / db w2 = bz(ix, iy , iz) / db - bz(ix, iym, iz) / dbym w3 = bz(ix, iyp2, iz) / dbyp2 - bz(ix, iyp, iz) / dbyp ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt / (db * vad_p + dbyp * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dyc(iy) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dyc(iym) * vad_p + dyc(iyp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dyb(iy) * vad_p + dyb(iyp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = flux(ix, iy, iz) & + v_advect * dt * w6 * (1.0_num - w5) END DO END DO END DO END SUBROUTINE vy_bz_flux SUBROUTINE y_mass_flux REAL(num) :: v_advect, flux_rho, vad_p, vad_m INTEGER :: iyp2 DO iz = 0, nz+1 izm = iz - 1 DO iy = 0, ny iym = iy - 1 iyp = iy + 1 iyp2 = iy + 2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx+1 ixm = ix - 1 v_advect = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iy, izm) + vy1(ixm, iy, izm)) / 4.0_num dm(ix, iy, iz) = (MAX(0.0_num, v_advect) * rho(ix, iy, iz) & + MIN(0.0_num, v_advect) * rho(ix, iyp, iz)) * dt w1 = rho(ix, iyp , iz) - rho(ix, iy , iz) w2 = rho(ix, iy , iz) - rho(ix, iym, iz) w3 = rho(ix, iyp2, iz) - rho(ix, iyp, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dyb1(ix, iy, iz) * vad_p + dyb1(ix, iyp, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dyc(iy) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dyc(iym) * vad_p + dyc(iyp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dyb(iy) * vad_p + dyb(iyp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux_rho = v_advect * dt * w6 * (1.0_num - w5) dm(ix, iy, iz) = (flux_rho + dm(ix, iy, iz)) * dxb(ix) * dzb(iz) END DO END DO END DO END SUBROUTINE y_mass_flux SUBROUTINE y_energy_flux ! energy remap in mass coordinates REAL(num) :: v_advect, vad_p, vad_m INTEGER :: iyp2 DO iz = 0, nz izm = iz - 1 DO iy = 0, ny iym = iy - 1 iyp = iy + 1 iyp2 = iy + 2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 v_advect = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iy, izm) + vy1(ixm, iy, izm)) / 4.0_num w1 = energy(ix, iyp , iz) - energy(ix, iy , iz) w2 = energy(ix, iy , iz) - energy(ix, iym, iz) w3 = energy(ix, iyp2, iz) - energy(ix, iyp, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dyb1(ix, iy, iz) * vad_p + dyb1(ix, iyp, iz) * vad_m) w7 = energy(ix, iy, iz) * vad_p + energy(ix, iyp, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dxb(ix) / dzb(iz) & / (rho1(ix, iy , iz) * dyb1(ix, iy , iz) * vad_p & + rho1(ix, iyp, iz) * dyb1(ix, iyp, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dyc(iy) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dyc(iym) * vad_p + dyc(iyp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dyb(iy) * vad_p + dyb(iyp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = dm(ix, iy, iz) * (w7 + w5 * (1.0_num - w6)) END DO END DO END DO END SUBROUTINE y_energy_flux SUBROUTINE y_momx_flux ! energy remap in mass coordinates REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: iyp2 DO iz = 0, nz DO iy = -1, ny iym = iy - 1 iyp = iy + 1 iyp2 = iy + 2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx v_advect = vy1(ix, iy, iz) w1 = vx(ix, iyp , iz) - vx(ix, iy , iz) w2 = vx(ix, iy , iz) - vx(ix, iym, iz) w3 = vx(ix, iyp2, iz) - vx(ix, iyp, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dyb1(ix, iy, iz) * vad_p + dyb1(ix, iyp, iz) * vad_m) w7 = vx(ix, iy, iz) * vad_p + vx(ix, iyp, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dxc(ix) / dzc(iz) & / (rho_v(ix, iy , iz) * dyb1(ix, iy , iz) * vad_p & + rho_v(ix, iyp, iz) * dyb1(ix, iyp, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dyb(iyp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dyb(iy) * vad_p + dyb(iyp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dyc(iy) * vad_p + dyc(iyp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz izp = iz + 1 DO iy = 0, ny-1 iym = iy - 1 iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ix, iyp, iz) * cv2(ix, iyp, iz) ai = (vx(ix, iy, iz) - flux(ix, iym, iz)) * dm(ix, iym, iz) / m & + (flux(ix, iy, iz) - vx(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vx(ix, iyp, iz) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ix, iyp, iz) - vx(ix, iyp, iz)) * dm(ix, iyp, iz) / mp dk = (vx(ix, iyp, iz) - vx(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vx(ix, iyp, iz) + vx(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vx(ix, iy, iz)) & + 0.5_num * aip * (vx(ix, iyp, iz) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ix , iyp, iz ) = delta_ke(ix , iyp, iz ) + dk delta_ke(ixp, iyp, iz ) = delta_ke(ixp, iyp, iz ) + dk delta_ke(ix , iyp, izp) = delta_ke(ix , iyp, izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(0:nx, -1:ny, 0:nz) = flux(0:nx, -1:ny, 0:nz) * dm(0:nx, -1:ny, 0:nz) END SUBROUTINE y_momx_flux SUBROUTINE y_momz_flux ! energy remap in mass coordinates REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: iyp2 DO iz = 0, nz DO iy = -1, ny iym = iy - 1 iyp = iy + 1 iyp2 = iy + 2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx v_advect = vy1(ix, iy, iz) w1 = vz(ix, iyp , iz) - vz(ix, iy , iz) w2 = vz(ix, iy , iz) - vz(ix, iym, iz) w3 = vz(ix, iyp2, iz) - vz(ix, iyp, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dyb1(ix, iy, iz) * vad_p + dyb1(ix, iyp, iz) * vad_m) w7 = vz(ix, iy, iz) * vad_p + vz(ix, iyp, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dxc(ix) / dzc(iz) & / (rho_v(ix, iy , iz) * dyb1(ix, iy , iz) * vad_p & + rho_v(ix, iyp, iz) * dyb1(ix, iyp, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dyb(iyp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dyb(iy) * vad_p + dyb(iyp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dyc(iy) * vad_p + dyc(iyp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz izp = iz + 1 DO iy = 0, ny-1 iym = iy - 1 iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ix, iyp, iz) * cv2(ix, iyp, iz) ai = (vz(ix, iy, iz) - flux(ix, iym, iz)) * dm(ix, iym, iz) / m & + (flux(ix, iy, iz) - vz(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vz(ix, iyp, iz) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ix, iyp, iz) - vz(ix, iyp, iz)) * dm(ix, iyp, iz) / mp dk = (vz(ix, iyp, iz) - vz(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vz(ix, iyp, iz) + vz(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vz(ix, iy, iz)) & + 0.5_num * aip * (vz(ix, iyp, iz) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ix , iyp, iz ) = delta_ke(ix , iyp, iz ) + dk delta_ke(ixp, iyp, iz ) = delta_ke(ixp, iyp, iz ) + dk delta_ke(ix , iyp, izp) = delta_ke(ix , iyp, izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(0:nx, -1:ny, 0:nz) = flux(0:nx, -1:ny, 0:nz) * dm(0:nx, -1:ny, 0:nz) END SUBROUTINE y_momz_flux SUBROUTINE y_momy_flux REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: iyp2 DO iz = 0, nz DO iy = -1, ny iym = iy - 1 iyp = iy + 1 iyp2 = iy + 2 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx v_advect = vy1(ix, iy, iz) w1 = vy(ix, iyp , iz) - vy(ix, iy , iz) w2 = vy(ix, iy , iz) - vy(ix, iym, iz) w3 = vy(ix, iyp2, iz) - vy(ix, iyp, iz) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dyb1(ix, iy, iz) * vad_p + dyb1(ix, iyp, iz) * vad_m) w7 = vy(ix, iy, iz) * vad_p + vy(ix, iyp, iz) * vad_m w6 = ABS(dm(ix, iy, iz)) / dxc(ix) / dzc(iz) & / (rho_v(ix, iy , iz) * dyb1(ix, iy , iz) * vad_p & + rho_v(ix, iyp, iz) * dyb1(ix, iyp, iz) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dyb(iyp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dyb(iy) * vad_p + dyb(iyp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dyc(iy) * vad_p + dyc(iyp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz izp = iz + 1 DO iy = 0, ny-1 iym = iy - 1 iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ix, iyp, iz) * cv2(ix, iyp, iz) ai = (vy(ix, iy, iz) - flux(ix, iym, iz)) * dm(ix, iym, iz) / m & + (flux(ix, iy, iz) - vy(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vy(ix, iyp, iz) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ix, iyp, iz) - vy(ix, iyp, iz)) * dm(ix, iyp, iz) / mp dk = (vy(ix, iyp, iz) - vy(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vy(ix, iyp, iz) + vy(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vy(ix, iy, iz)) & + 0.5_num * aip * (vy(ix, iyp, iz) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ix , iyp, iz ) = delta_ke(ix , iyp, iz ) + dk delta_ke(ixp, iyp, iz ) = delta_ke(ixp, iyp, iz ) + dk delta_ke(ix , iyp, izp) = delta_ke(ix , iyp, izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(0:nx, -1:ny, 0:nz) = flux(0:nx, -1:ny, 0:nz) * dm(0:nx, -1:ny, 0:nz) END SUBROUTINE y_momy_flux SUBROUTINE dm_y_bcs CALL MPI_SENDRECV(dm(0:nx+1, 1, 0:nz+1), (nx+2)*(nz+2), mpireal, & down, tag, dm(0:nx+1, ny+1, 0:nz+1), (nx+2)*(nz+2), mpireal, & up, tag, comm, status, errcode) IF (up == MPI_PROC_NULL) & dm(0:nx+1, ny+1, 0:nz+1) = dm(0:nx+1, ny, 0:nz+1) CALL MPI_SENDRECV(dm(0:nx+1, ny-1, 0:nz+1), (nx+2)*(nz+2), mpireal, & up, tag, dm(0:nx+1, -1, 0:nz+1), (nx+2)*(nz+2), mpireal, & down, tag, comm, status, errcode) IF (down == MPI_PROC_NULL) & dm(0:nx+1, -1, 0:nz+1) = dm(0:nx+1, 0, 0:nz+1) END SUBROUTINE dm_y_bcs END MODULE yremap Lare3d/src/core/zremap.f90000644 000765 000024 00000064637 11430520723 015636 0ustar00Tonystaff000000 000000 !------------------------------------------------------------------------- ! mass coordinate based Van Leer limited remap. ! See Bram van Leer, JCP, vol 135, p229, (1997) ! Now rewritten to allow compiler vectorizing of loops ! See notes in code !------------------------------------------------------------------------- MODULE zremap USE shared_data; USE boundary IMPLICIT NONE PRIVATE PUBLIC :: remap_z REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: rho1, dm, cv2, flux, dzb1 REAL(num), DIMENSION(:, :, :), ALLOCATABLE :: rho_v, rho_v1 CONTAINS SUBROUTINE remap_z ! remap onto original Eulerian grid REAL(num) :: vxb, vxbm, vyb, vybm, vzb, vzbm, dv ALLOCATE (rho1(-1:nx+2, -1:ny+2, -1:nz+2), dm(-1:nx+2, -1:ny+2, -1:nz+2), & cv2(-1:nx+2, -1:ny+2, -1:nz+2), flux(-1:nx+2, -1:ny+2, -2:nz+2), & dzb1(-1:nx+2, -1:ny+2, -1:nz+2), rho_v(-1:nx+2, -1:ny+2, -1:nz+2), & rho_v1(-1:nx+2, -1:ny+2, -1:nz+2)) dm = 0.0_num rho1 = rho ! store initial density in rho1 DO iz = -1, nz+2 izm = iz - 1 DO iy = -1, ny+2 iym = iy - 1 DO ix = -1, nx+2 ixm = ix - 1 ! vx at Sx(i, j, k) vxb = (vx1(ix, iy, iz) + vx1(ix, iym, iz) & + vx1(ix, iy, izm) + vx1(ix, iym, izm)) / 4.0_num ! vx at Sx(i-1, j, k) vxbm = (vx1(ixm, iy, iz) + vx1(ixm, iym, iz) & + vx1(ixm, iy, izm) + vx1(ixm, iym, izm)) / 4.0_num ! vy at Sy(i, j, k) vyb = (vy1(ix, iy, iz) + vy1(ixm, iy, iz) & + vy1(ix, iy, izm) + vy1(ixm, iy, izm)) / 4.0_num ! vy at Sy(i, j-1, k) vybm = (vy1(ix, iym, iz) + vy1(ixm, iym, iz) & + vy1(ix, iym, izm) + vy1(ixm, iym, izm)) / 4.0_num ! vz at Sz(i, j, k) vzb = (vz1(ix, iy, iz) + vz1(ixm, iy, iz) & + vz1(ix, iym, iz) + vz1(ixm, iym, iz)) / 4.0_num ! vz at Sz(i, j, k-1) vzbm = (vz1(ix, iy, izm) + vz1(ixm, iy, izm) & + vz1(ix, iym, izm) + vz1(ixm, iym, izm)) / 4.0_num dv = (REAL(xpass, num) * (vxb - vxbm) / dxb(ix) & + REAL(ypass, num) * (vyb - vybm) / dyb(iy) & + (vzb - vzbm) / dzb(iz)) * dt ! control volume before remap cv1(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) dv = (REAL(xpass, num) * (vxb - vxbm) / dxb(ix) & + REAL(ypass, num) * (vyb - vybm) / dyb(iy)) * dt ! control volume after remap cv2(ix, iy, iz) = cv(ix, iy, iz) * (1.0_num + dv) ! dzb before remap dzb1(ix, iy, iz) = dzb(iz) + (vzb - vzbm) * dt END DO END DO END DO ! Evans and Hawley (ApJ, vol 332, p650, (1988)) ! constrained transport remap of magnetic fluxes CALL vz_bx_flux DO iz = 1, nz izm = iz - 1 DO iy = 1, ny DO ix = 0, nx bx(ix, iy, iz) = bx(ix, iy, iz) - flux(ix, iy, iz) + flux(ix, iy, izm) END DO END DO END DO DO iz = 0, nz DO iy = 1, ny DO ix = 1, nx ixm = ix - 1 bz(ix, iy, iz) = bz(ix, iy, iz) + flux(ix, iy, iz) - flux(ixm, iy, iz) END DO END DO END DO CALL vz_by_flux DO iz = 1, nz izm = iz - 1 DO iy = 0, ny DO ix = 1, nx by(ix, iy, iz) = by(ix, iy, iz) - flux(ix, iy, iz) + flux(ix, iy, izm) END DO END DO END DO DO iz = 0, nz DO iy = 1, ny iym = iy - 1 DO ix = 1, nx bz(ix, iy, iz) = bz(ix, iy, iz) + flux(ix, iy, iz) - flux(ix, iym, iz) END DO END DO END DO ! remap of mass + calculation of mass fluxes (dm) needed for later remaps CALL z_mass_flux ! calculates dm(0:nx, 0:ny+1) CALL dm_z_bcs ! need dm(0:nx+1, 0:ny+1) for velocity remap DO iz = 1, nz izm = iz - 1 DO iy = 1, ny DO ix = 1, nx rho(ix, iy, iz) = (rho1(ix, iy, iz) * cv1(ix, iy, iz) & + dm(ix, iy, izm) - dm(ix, iy, iz)) / cv2(ix, iy, iz) END DO END DO END DO ! remap specific energy density using mass coordinates CALL z_energy_flux DO iz = 1, nz izm = iz - 1 DO iy = 1, ny DO ix = 1, nx energy(ix, iy, iz) = (energy(ix, iy, iz) * cv1(ix, iy, iz) & * rho1(ix, iy, iz) + flux(ix, iy, izm) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho(ix, iy, iz)) END DO END DO END DO ! redefine dzb1, cv1, cv2, dm and vz1 for velocity (vertex) cells ! in some of these calculations the flux variable is used as a ! temporary array DO iz = -1, nz+1 izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 ! vertex density before remap rho_v(ix, iy, iz) = rho1(ix, iy, iz) * cv1(ix, iy, iz) & + rho1(ixp, iy , iz ) * cv1(ixp, iy , iz ) & + rho1(ix , iyp, iz ) * cv1(ix , iyp, iz ) & + rho1(ixp, iyp, iz ) * cv1(ixp, iyp, iz ) & + rho1(ix , iy , izp) * cv1(ix , iy , izp) & + rho1(ixp, iy , izp) * cv1(ixp, iy , izp) & + rho1(ix , iyp, izp) * cv1(ix , iyp, izp) & + rho1(ixp, iyp, izp) * cv1(ixp, iyp, izp) rho_v(ix, iy, iz) = rho_v(ix, iy, iz) & / (cv1(ix, iy , iz ) + cv1(ixp, iy , iz ) & + cv1(ix, iyp, iz ) + cv1(ixp, iyp, iz ) & + cv1(ix, iy , izp) + cv1(ixp, iy , izp) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp)) END DO END DO END DO DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = cv1(ix, iy, iz) + cv1(ixp, iy, iz) & + cv1(ix, iyp, iz ) + cv1(ixp, iyp, iz ) & + cv1(ix, iy , izp) + cv1(ixp, iy , izp) & + cv1(ix, iyp, izp) + cv1(ixp, iyp, izp) END DO END DO END DO ! cv1 = vertex CV before remap cv1(0:nx, 0:ny, 0:nz) = flux(0:nx, 0:ny, 0:nz) / 8.0_num DO iz = 0, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = cv2(ix, iy, iz) + cv2(ixp, iy, iz) & + cv2(ix, iyp, iz ) + cv2(ixp, iyp, iz ) & + cv2(ix, iy , izp) + cv2(ixp, iy , izp) & + cv2(ix, iyp, izp) + cv2(ixp, iyp, izp) END DO END DO END DO ! cv2 = vertex CV after remap cv2(0:nx, 0:ny, 0:nz) = flux(0:nx, 0:ny, 0:nz) / 8.0_num DO iz = -2, nz+1 izp = iz + 1 DO iy = 0, ny DO ix = 0, nx flux(ix, iy, iz) = (vz1(ix, iy, iz) + vz1(ix, iy, izp)) / 2.0_num END DO END DO END DO ! vertex boundary velocity used in remap vz1(0:nx, 0:ny, -2:nz+1) = flux(0:nx, 0:ny, -2:nz+1) DO iz = -1, nz+1 izm = iz - 1 DO iy = 0, ny DO ix = 0, nx ! dzb1 = width of vertex CV before remap dzb1(ix, iy, iz) = dzc(iz) + (vz1(ix, iy, iz) - vz1(ix, iy, izm)) * dt END DO END DO END DO DO iz = -1, nz izp = iz + 1 DO iy = 0, ny iyp = iy + 1 DO ix = 0, nx ixp = ix + 1 flux(ix, iy, iz) = dm(ix, iy, iz) + dm(ixp, iy, iz) & + dm(ix, iyp, iz ) + dm(ixp, iyp, iz ) & + dm(ix, iy , izp) + dm(ixp, iy , izp) & + dm(ix, iyp, izp) + dm(ixp, iyp, izp) END DO END DO END DO ! mass flux out of vertex CV dm(0:nx, 0:ny, -1:nz) = flux(0:nx, 0:ny, -1:nz) / 8.0_num DO iz = 0, nz izm = iz - 1 DO iy = 0, ny DO ix = 0, nx ! vertex density after remap rho_v1(ix, iy, iz) = (rho_v(ix, iy, iz) * cv1(ix, iy, iz) & + dm(ix, iy, izm) - dm(ix, iy, iz)) / cv2(ix, iy, iz) END DO END DO END DO CALL z_momy_flux DO iz = 0, nz izm = iz - 1 DO iy = 0, ny DO ix = 0, nx vy(ix, iy, iz) = (rho_v(ix, iy, iz) * vy(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ix, iy, izm) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL z_momx_flux DO iz = 0, nz izm = iz - 1 DO iy = 0, ny DO ix = 0, nx vx(ix, iy, iz) = (rho_v(ix, iy, iz) * vx(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ix, iy, izm) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL z_momz_flux DO iz = 0, nz izm = iz - 1 DO iy = 0, ny DO ix = 0, nx vz(ix, iy, iz) = (rho_v(ix, iy, iz) * vz(ix, iy, iz) & * cv1(ix, iy, iz) + flux(ix, iy, izm) - flux(ix, iy, iz)) & / (cv2(ix, iy, iz) * rho_v1(ix, iy, iz)) END DO END DO END DO CALL boundary_conditions DEALLOCATE (rho1, dm, cv2, flux, dzb1, rho_v, rho_v1) zpass = 0 END SUBROUTINE remap_z SUBROUTINE vz_bx_flux REAL(num) :: v_advect, vad_p, vad_m REAL(num) :: db, dbzp, dbzp2, dbzm INTEGER :: izp2 DO iz = 0, nz izm = iz - 1 izp = iz + 1 izp2 = iz + 2 DO iy = 0, ny iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 v_advect = (vz1(ix, iy, iz) + vz1(ix, iym, iz)) / 2.0_num db = (dzb1(ix, iy, iz ) + dzb1(ixp, iy, iz )) / 2.0_num dbzp = (dzb1(ix, iy, izp ) + dzb1(ixp, iy, izp )) / 2.0_num dbzp2 = (dzb1(ix, iy, izp2) + dzb1(ixp, iy, izp2)) / 2.0_num dbzm = (dzb1(ix, iy, izm ) + dzb1(ixp, iy, izm )) / 2.0_num w4 = bx(ix, iy, iz ) / db w5 = bx(ix, iy, izp) / dbzp flux(ix, iy, iz) = (MAX(0.0_num, v_advect) * w4 & + MIN(0.0_num, v_advect) * w5) * dt w1 = bx(ix, iy, izp ) / dbzp - bx(ix, iy, iz ) / db w2 = bx(ix, iy, iz ) / db - bx(ix, iy, izm) / dbzm w3 = bx(ix, iy, izp2) / dbzp2 - bx(ix, iy, izp) / dbzp ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt / (db * vad_p + dbzp * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dzc(iz) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dzc(izm) * vad_p + dzc(izp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dzb(iz) * vad_p + dzb(izp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = flux(ix, iy, iz) & + v_advect * dt * w6 * (1.0_num - w5) END DO END DO END DO END SUBROUTINE vz_bx_flux SUBROUTINE vz_by_flux REAL(num) :: v_advect, vad_p, vad_m REAL(num) :: db, dbzp, dbzp2, dbzm INTEGER :: izp2 DO iz = 0, nz izm = iz - 1 izp = iz + 1 izp2 = iz + 2 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 v_advect = (vz1(ix, iy, iz) + vz1(ixm, iy, iz)) / 2.0_num db = (dzb1(ix, iy, iz ) + dzb1(ix, iyp, iz )) / 2.0_num dbzp = (dzb1(ix, iy, izp ) + dzb1(ix, iyp, izp )) / 2.0_num dbzp2 = (dzb1(ix, iy, izp2) + dzb1(ix, iyp, izp2)) / 2.0_num dbzm = (dzb1(ix, iy, izm ) + dzb1(ix, iyp, izm )) / 2.0_num w4 = by(ix, iy, iz ) / db w5 = by(ix, iy, izp) / dbzp flux(ix, iy, iz) = (MAX(0.0_num, v_advect) * w4 & + MIN(0.0_num, v_advect) * w5) * dt w1 = by(ix, iy, izp ) / dbzp - by(ix, iy, iz ) / db w2 = by(ix, iy, iz ) / db - by(ix, iy, izm) / dbzm w3 = by(ix, iy, izp2) / dbzp2 - by(ix, iy, izp) / dbzp ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt / (db * vad_p + dbzp + vad_m) w4 = (2.0_num - w5) * ABS(w1) / dzc(iz) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dzc(izm) * vad_p + dzc(izp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dzb(iz) * vad_p + dzb(izp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = flux(ix, iy, iz) & + v_advect * dt * w6 * (1.0_num - w5) END DO END DO END DO END SUBROUTINE vz_by_flux SUBROUTINE z_mass_flux REAL(num) :: v_advect, flux_rho, vad_p, vad_m INTEGER :: izp2 DO iz = 0, nz izm = iz - 1 izp = iz + 1 izp2 = iz + 2 DO iy = 0, ny+1 iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx+1 ixm = ix - 1 v_advect = (vz1(ix, iy, iz) + vz1(ix, iym, iz) & + vz1(ixm, iy, iz) + vz1(ixm, iym, iz)) / 4.0_num dm(ix, iy, iz) = (MAX(0.0_num, v_advect) * rho(ix, iy, iz) & + MIN(0.0_num, v_advect) * rho(ix, iy, izp)) * dt w1 = rho(ix, iy, izp ) - rho(ix, iy, iz ) w2 = rho(ix, iy, iz ) - rho(ix, iy, izm) w3 = rho(ix, iy, izp2) - rho(ix, iy, izp) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dzb1(ix, iy, iz) * vad_p + dzb1(ix, iy, izp) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dzc(iz) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dzc(izm) * vad_p + dzc(izp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w6 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dzb(iz) * vad_p + dzb(izp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux_rho = v_advect * dt * w6 * (1.0_num - w5) dm(ix, iy, iz) = (flux_rho + dm(ix, iy, iz)) * dxb(ix) * dyb(iy) END DO END DO END DO END SUBROUTINE z_mass_flux SUBROUTINE z_energy_flux ! energy remap in mass coordinates REAL(num) :: v_advect, vad_p, vad_m INTEGER :: izp2 DO iz = 0, nz izm = iz - 1 izp = iz + 1 izp2 = iz + 2 DO iy = 0, ny iym = iy - 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixm = ix - 1 v_advect = (vz1(ix, iy, iz) + vz1(ix, iym, iz) & + vz1(ixm, iy, iz) + vz1(ixm, iym, iz)) / 4.0_num w1 = energy(ix, iy, izp ) - energy(ix, iy, iz ) w2 = energy(ix, iy, iz ) - energy(ix, iy, izm) w3 = energy(ix, iy, izp2) - energy(ix, iy, izp) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dzb1(ix, iy, iz) * vad_p + dzb1(ix, iy, izp) * vad_m) w7 = energy(ix, iy, iz) * vad_p + energy(ix, iy, izp) * vad_m w6 = ABS(dm(ix, iy, iz)) / dxb(ix) / dyb(iy) & / (rho1(ix, iy, iz ) * dzb1(ix, iy, iz ) * vad_p & + rho1(ix, iy, izp) * dzb1(ix, iy, izp) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dzc(iz) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dzc(izm) * vad_p + dzc(izp) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dzb(iz) * vad_p + dzb(izp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = dm(ix, iy, iz) * (w7 + w5 * (1.0_num - w6)) END DO END DO END DO END SUBROUTINE z_energy_flux SUBROUTINE z_momy_flux ! energy remap in mass coordinates REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: izp2 DO iz = -1, nz izm = iz - 1 izp = iz + 1 izp2 = iz + 2 DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx v_advect = vz1(ix, iy, iz) w1 = vy(ix, iy, izp ) - vy(ix, iy, iz ) w2 = vy(ix, iy, iz ) - vy(ix, iy, izm) w3 = vy(ix, iy, izp2) - vy(ix, iy, izp) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dzb1(ix, iy, iz) * vad_p + dzb1(ix, iy, izp) * vad_m) w7 = vy(ix, iy, iz) * vad_p + vy(ix, iy, izp) * vad_m w6 = ABS(dm(ix, iy, iz)) / dyc(iy) / dxc(ix) & / (rho_v(ix, iy, iz ) * dzb1(ix, iy, iz ) * vad_p & + rho_v(ix, iy, izp) * dzb1(ix, iy, izp) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dzb(izp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dzb(iz) * vad_p + dzb(izp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dzc(iz) * vad_p + dzc(izp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz-1 izm = iz - 1 izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ix, iy, izp) * cv2(ix, iy, izp) ai = (vy(ix, iy, iz) - flux(ix, iy, izm)) * dm(ix, iy, izm) / m & + (flux(ix, iy, iz) - vy(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vy(ix, iy, izp) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ix, iy, izp) - vy(ix, iy, izp)) * dm(ix, iy, izp) / mp dk = (vy(ix, iy, izp) - vy(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vy(ix, iy, izp) + vy(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vy(ix, iy, iz)) & + 0.5_num * aip * (vy(ix, iy, izp) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ix , iy , izp) = delta_ke(ix , iy , izp) + dk delta_ke(ixp, iy , izp) = delta_ke(ixp, iy , izp) + dk delta_ke(ix , iyp, izp) = delta_ke(ix , iyp, izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(0:nx, 0:ny, -1:nz) = flux(0:nx, 0:ny, -1:nz) * dm(0:nx, 0:ny, -1:nz) END SUBROUTINE z_momy_flux SUBROUTINE z_momx_flux ! energy remap in mass coordinates REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: izp2 DO iz = -1, nz izm = iz - 1 izp = iz + 1 izp2 = iz + 2 DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx v_advect = vz1(ix, iy, iz) w1 = vx(ix, iy, izp ) - vx(ix, iy, iz ) w2 = vx(ix, iy, iz ) - vx(ix, iy, izm) w3 = vx(ix, iy, izp2) - vx(ix, iy, izp) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dzb1(ix, iy, iz) * vad_p + dzb1(ix, iy, izp) * vad_m) w7 = vx(ix, iy, iz) * vad_p + vx(ix, iy, izp) * vad_m w6 = ABS(dm(ix, iy, iz)) / dyc(iy) / dxc(ix) & / (rho_v(ix, iy, iz ) * dzb1(ix, iy, iz ) * vad_p & + rho_v(ix, iy, izp) * dzb1(ix, iy, izp) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dzb(izp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dzb(iz) * vad_p + dzb(izp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dzc(iz) * vad_p + dzc(izp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz-1 izm = iz - 1 izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ix, iy, izp) * cv2(ix, iy, izp) ai = (vx(ix, iy, iz) - flux(ix, iy, izm)) * dm(ix, iy, izm) / m & + (flux(ix, iy, iz) - vx(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vx(ix, iy, izp) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ix, iy, izp) - vx(ix, iy, izp)) * dm(ix, iy, izp) / mp dk = (vx(ix, iy, izp) - vx(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vx(ix, iy, izp) + vx(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vx(ix, iy, iz)) & + 0.5_num * aip * (vx(ix, iy, izp) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ix , iy , izp) = delta_ke(ix , iy , izp) + dk delta_ke(ixp, iy , izp) = delta_ke(ixp, iy , izp) + dk delta_ke(ix , iyp, izp) = delta_ke(ix , iyp, izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(0:nx, 0:ny, -1:nz) = flux(0:nx, 0:ny, -1:nz) * dm(0:nx, 0:ny, -1:nz) END SUBROUTINE z_momx_flux SUBROUTINE z_momz_flux REAL(num) :: v_advect, m, mp, ai, aip, dk REAL(num) :: vad_p, vad_m INTEGER :: izp2 DO iz = -1, nz izm = iz - 1 izp = iz + 1 izp2 = iz + 2 DO iy = 0, ny !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx v_advect = vz1(ix, iy, iz) w1 = vz(ix, iy, izp ) - vz(ix, iy, iz ) w2 = vz(ix, iy, iz ) - vz(ix, iy, izm) w3 = vz(ix, iy, izp2) - vz(ix, iy, izp) ! vad_p and vad_m are logical switches which determine v_advect>=0 ! and v_advect<0 respectively. It's written this way to allow vector ! optimization vad_p = -MIN(SIGN(1.0_num, -v_advect), 0.0_num) vad_m = MAX(SIGN(1.0_num, -v_advect), 0.0_num) w5 = ABS(v_advect) * dt & / (dzb1(ix, iy, iz) * vad_p + dzb1(ix, iy, izp) * vad_m) w7 = vz(ix, iy, iz) * vad_p + vz(ix, iy, izp) * vad_m w6 = ABS(dm(ix, iy, iz)) / dyc(iy) / dxc(ix) & / (rho_v(ix, iy, iz ) * dzb1(ix, iy, iz ) * vad_p & + rho_v(ix, iy, izp) * dzb1(ix, iy, izp) * vad_m) w4 = (2.0_num - w5) * ABS(w1) / dzb(izp) & + (1.0_num + w5) * ABS(w2 * vad_p + w3 * vad_m) & / (dzb(iz) * vad_p + dzb(izp2) * vad_m) w4 = w4 / 6.0_num w8 = 0.5_num * (SIGN(1.0_num, w1) & + SIGN(1.0_num, w2 * vad_p + w3 * vad_m)) w5 = SIGN(1.0_num, v_advect) * w8 & * MIN(ABS(w4) * (dzc(iz) * vad_p + dzc(izp) * vad_m), & ABS(w1), ABS(w2 * vad_p + w3 * vad_m)) flux(ix, iy, iz) = w7 + w5 * (1.0_num - w6) END DO END DO END DO IF (rke) THEN DO iz = 0, nz-1 izm = iz - 1 izp = iz + 1 DO iy = 0, ny iyp = iy + 1 !DEC$ IVDEP !DEC$ VECTOR ALWAYS DO ix = 0, nx ixp = ix + 1 m = rho_v1(ix, iy, iz) * cv2(ix, iy, iz) mp = rho_v1(ix, iy, izp) * cv2(ix, iy, izp) ai = (vz(ix, iy, iz) - flux(ix, iy, izm)) * dm(ix, iy, izm) / m & + (flux(ix, iy, iz) - vz(ix, iy, iz)) * dm(ix, iy, iz) / m aip = (vz(ix, iy, izp) - flux(ix, iy, iz)) * dm(ix, iy, iz) / mp & + (flux(ix, iy, izp) - vz(ix, iy, izp)) * dm(ix, iy, izp) / mp dk = (vz(ix, iy, izp) - vz(ix, iy, iz)) * (flux(ix, iy, iz) & - 0.5_num * (vz(ix, iy, izp) + vz(ix, iy, iz))) & + 0.5_num * ai * (flux(ix, iy, iz) - vz(ix, iy, iz)) & + 0.5_num * aip * (vz(ix, iy, izp) - flux(ix, iy, iz)) dk = dk * dm(ix, iy, iz) / 4.0_num delta_ke(ix , iy , izp) = delta_ke(ix , iy , izp) + dk delta_ke(ixp, iy , izp) = delta_ke(ixp, iy , izp) + dk delta_ke(ix , iyp, izp) = delta_ke(ix , iyp, izp) + dk delta_ke(ixp, iyp, izp) = delta_ke(ixp, iyp, izp) + dk END DO END DO END DO END IF flux(0:nx, 0:ny, -1:nz) = flux(0:nx, 0:ny, -1:nz) * dm(0:nx, 0:ny, -1:nz) END SUBROUTINE z_momz_flux SUBROUTINE dm_z_bcs CALL MPI_SENDRECV(dm(0:nx+1, 0:ny+1, 1), (nx+2)*(ny+2), mpireal, & front, tag, dm(0:nx+1, 0:ny+1, nz+1), (nx+2)*(ny+2), mpireal, & back, tag, comm, status, errcode) IF (back == MPI_PROC_NULL) & dm(0:nx+1, 0:ny+1, nz+1) = dm(0:nx+1, 0:ny+1, nz) CALL MPI_SENDRECV(dm(0:nx+1, 0:ny+1, nz-1), (nx+2)*(ny+2), mpireal, & back, tag, dm(0:nx+1, 0:ny+1, -1), (nx+2)*(ny+2), mpireal, & front, tag, comm, status, errcode) IF (front == MPI_PROC_NULL) & dm(0:nx+1, 0:ny+1, -1) = dm(0:nx+1, 0:ny+1, 0) END SUBROUTINE dm_z_bcs END MODULE zremap