Skip to content

Commit

Permalink
Merge branch 'lagrangian' of github.com:dgvacarevelo/MFC into lagrangian
Browse files Browse the repository at this point in the history
  • Loading branch information
Diego Vaca committed Dec 21, 2024
2 parents af9371e + 3c20449 commit 12de935
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 36 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/lint-source.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,5 +36,5 @@ jobs:

- name: No double precision intrinsics
run: |
! grep -iR 'dsqrt\|dexp\|dlog\|dble\|dabs\|double\ precision\|real(8)\|real(4)\|dprod\|dmin\|dmax\|dfloat\|dreal\|dcos\|dsin\|dtan\|dsign\|dtanh\|dsinh\|dcosh\|d0' --exclude-dir=syscheck --exclude="*nvtx*" ./src/*
! grep -iR 'double_precision\|dsqrt\|dexp\|dlog\|dble\|dabs\|double\ precision\|real(8)\|real(4)\|dprod\|dmin\|dmax\|dfloat\|dreal\|dcos\|dsin\|dtan\|dsign\|dtanh\|dsinh\|dcosh\|d0' --exclude-dir=syscheck --exclude="*nvtx*" --exclude="*precision_select*" ./src/*
2 changes: 1 addition & 1 deletion src/common/m_mpi_common.fpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ contains
end do
allocate (gathered_vector(sum(recounts)))
call MPI_GATHERV(my_vector, counts, MPI_DOUBLE_PRECISION, gathered_vector, recounts, displs, MPI_DOUBLE_PRECISION, &
call MPI_GATHERV(my_vector, counts, mpi_p, gathered_vector, recounts, displs, mpi_p, &
root, MPI_COMM_WORLD, ierr)
#endif
end subroutine s_mpi_gather_data
Expand Down
4 changes: 2 additions & 2 deletions src/post_process/m_data_input.f90
Original file line number Diff line number Diff line change
Expand Up @@ -516,10 +516,10 @@ subroutine s_read_parallel_data_files(t_step)
! Initial displacement to skip at beginning of file
disp = m_MOK*max(MOK, n_MOK)*max(MOK, p_MOK)*WP_MOK*(var_MOK - 1)

call MPI_FILE_SET_VIEW(ifile, disp, MPI_DOUBLE_PRECISION, MPI_IO_DATA%view(sys_size + 1), &
call MPI_FILE_SET_VIEW(ifile, disp, mpi_p, MPI_IO_DATA%view(sys_size + 1), &
'native', mpi_info_int, ierr)
call MPI_FILE_READ(ifile, MPI_IO_DATA%var(sys_size + 1)%sf, data_size, &
MPI_DOUBLE_PRECISION, status, ierr)
mpi_p, status, ierr)
end if

call s_mpi_barrier()
Expand Down
8 changes: 4 additions & 4 deletions src/post_process/m_data_output.fpp
Original file line number Diff line number Diff line change
Expand Up @@ -1076,7 +1076,7 @@ contains
end if
call MPI_BCAST(tot_data, 1, MPI_integer, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(time_real, 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(time_real, 1, mpi_p, 0, MPI_COMM_WORLD, ierr)
gsizes(1) = tot_data
gsizes(2) = 21
Expand All @@ -1086,7 +1086,7 @@ contains
start_idx_part(2) = 0
call MPI_TYPE_CREATE_SUBARRAY(2, gsizes, lsizes, start_idx_part, &
MPI_ORDER_FORTRAN, MPI_DOUBLE_PRECISION, view, ierr)
MPI_ORDER_FORTRAN, mpi_p, view, ierr)
call MPI_TYPE_COMMIT(view, ierr)
write (file_loc, '(A,I0,A)') 'lag_bubbles_', t_step, '.dat'
Expand All @@ -1099,13 +1099,13 @@ contains
mpi_info_int, ifile, ierr)
disp = 0._wp
call MPI_FILE_SET_VIEW(ifile, disp, MPI_DOUBLE_PRECISION, view, &
call MPI_FILE_SET_VIEW(ifile, disp, mpi_p, view, &
'native', mpi_info_null, ierr)
allocate (MPI_IO_DATA_lg_bubbles(tot_data, 1:21))
call MPI_FILE_READ_ALL(ifile, MPI_IO_DATA_lg_bubbles, 21*tot_data, &
MPI_DOUBLE_PRECISION, status, ierr)
mpi_p, status, ierr)
write (file_loc, '(A,I0,A)') 'lag_bubbles_post_process_', t_step, '.dat'
file_loc = trim(case_dir)//'/lag_bubbles_post_process/'//trim(file_loc)
Expand Down
6 changes: 3 additions & 3 deletions src/pre_process/m_mpi_proxy.fpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ contains
#:endfor
#:for VAR in [ '2', '3', '4', '5', '6', '7', '8', '9']
call MPI_BCAST(patch_icpp(i)%a(${VAR}$), 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(patch_icpp(i)%a(${VAR}$), 1, mpi_p, 0, MPI_COMM_WORLD, ierr)
#:endfor
call MPI_BCAST(patch_icpp(i)%model_filepath, len(patch_icpp(i)%model_filepath), MPI_CHARACTER, 0, MPI_COMM_WORLD, ierr)
Expand All @@ -106,7 +106,7 @@ contains
! Broadcast IB variables
call MPI_BCAST(patch_ib(i)%geometry, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(patch_ib(i)%model_filepath, len(patch_ib(i)%model_filepath), MPI_CHARACTER, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(patch_ib(i)%model_threshold, 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(patch_ib(i)%model_threshold, 1, mpi_p, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(patch_ib(i)%model_spc, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
#:for VAR in [ 'x_centroid', 'y_centroid', 'z_centroid', &
Expand All @@ -115,7 +115,7 @@ contains
#:endfor
#:for VAR in [ 'model_translate', 'model_scale', 'model_rotate']
call MPI_BCAST(patch_ib(i)%${VAR}$, size(patch_ib(i)%${VAR}$), MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(patch_ib(i)%${VAR}$, size(patch_ib(i)%${VAR}$), mpi_p, 0, MPI_COMM_WORLD, ierr)
#:endfor
end do
Expand Down
25 changes: 12 additions & 13 deletions src/simulation/m_bubbles_EE.fpp
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ contains
real(wp), intent(out) :: h

real(wp) :: h0, h1 !< Time step size
real(wp) :: d0, d1, d2 !< norms
real(wp) :: d_0, d_1, d_2 !< norms
real(wp), dimension(2) :: myR_tmp, myV_tmp, myA_tmp !< Bubble radius, radial velocity, and radial acceleration

! Determine the starting time step
Expand All @@ -438,13 +438,13 @@ contains
fpb, fpbdot, alf, fntait, fBtait, &
f_bub_adv_src, f_divu)

! Compute d0 = ||y0|| and d1 = ||f(x0,y0)||
d0 = sqrt((myR_tmp(1)**2._wp + myV_tmp(1)**2._wp)/2._wp)
d1 = sqrt((myV_tmp(1)**2._wp + myA_tmp(1)**2._wp)/2._wp)
if (d0 < 1e-5_wp .or. d1 < 1e-5_wp) then
! Compute d_0 = ||y0|| and d_1 = ||f(x0,y0)||
d_0 = sqrt((myR_tmp(1)**2._wp + myV_tmp(1)**2._wp)/2._wp)
d_1 = sqrt((myV_tmp(1)**2._wp + myA_tmp(1)**2._wp)/2._wp)
if (d_0 < 1e-5_wp .or. d_1 < 1e-5_wp) then
h0 = 1e-6_wp
else
h0 = 1e-2_wp*(d0/d1)
h0 = 1e-2_wp*(d_0/d_1)
end if

! Evaluate f(x0+h0,y0+h0*f(x0,y0))
Expand All @@ -454,18 +454,17 @@ contains
fpb, fpbdot, alf, fntait, fBtait, &
f_bub_adv_src, f_divu)

! Compute d2 = ||f(x0+h0,y0+h0*f(x0,y0))-f(x0,y0)||/h0
d2 = sqrt(((myV_tmp(2) - myV_tmp(1))**2._wp + (myA_tmp(2) - myA_tmp(1))**2._wp)/2._wp)/h0
! Compute d_2 = ||f(x0+h0,y0+h0*f(x0,y0))-f(x0,y0)||/h0
d_2 = sqrt(((myV_tmp(2) - myV_tmp(1))**2._wp + (myA_tmp(2) - myA_tmp(1))**2._wp)/2._wp)/h0

! Set h1 = (0.01/max(d1,d2))^{1/(p+1)}
! if max(d1,d2) < 1e-15_wp, h1 = max(1e-6_wp, h0*1e-3_wp)
if (max(d1, d2) < 1e-15_wp) then
! Set h1 = (0.01/max(d_1,d_2))^{1/(p+1)}
! if max(d_1,d_2) < 1e-15_wp, h1 = max(1e-6_wp, h0*1e-3_wp)
if (max(d_1, d_2) < 1e-15_wp) then
h1 = max(1e-6_wp, h0*1e-3_wp)
else
h1 = (1e-2_wp/max(d1, d2))**(1._wp/3._wp)
h1 = (1e-2_wp/max(d_1, d_2))**(1._wp/3._wp)
end if

! Set h = min(100*h0,h1)
h = min(100._wp*h0, h1)

end subroutine s_initialize_adap_dt
Expand Down
16 changes: 8 additions & 8 deletions src/simulation/m_bubbles_EL.fpp
Original file line number Diff line number Diff line change
Expand Up @@ -439,8 +439,8 @@ contains
end if

call MPI_BCAST(tot_data, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(mytime, 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(dt, 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(mytime, 1, mpi_p, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(dt, 1, mpi_p, 0, MPI_COMM_WORLD, ierr)

gsizes(1) = tot_data
gsizes(2) = 21
Expand All @@ -450,7 +450,7 @@ contains
start_idx_part(2) = 0

call MPI_type_CREATE_SUBARRAY(2, gsizes, lsizes, start_idx_part, &
MPI_ORDER_FORTRAN, MPI_doUBLE_PRECISION, view, ierr)
MPI_ORDER_FORTRAN, mpi_p, view, ierr)
call MPI_type_COMMIT(view, ierr)

! Open the file to write all flow variables
Expand All @@ -462,11 +462,11 @@ contains
call MPI_FILE_open(MPI_COMM_WORLD, file_loc, MPI_MODE_RDONLY, &
mpi_info_int, ifile, ierr)
disp = 0._wp
call MPI_FILE_SET_VIEW(ifile, disp, MPI_doUBLE_PRECISION, view, &
call MPI_FILE_SET_VIEW(ifile, disp, mpi_p, view, &
'native', mpi_info_null, ierr)
allocate (MPI_IO_DATA_lag_bubbles(tot_data, 1:21))
call MPI_FILE_read_ALL(ifile, MPI_IO_DATA_lag_bubbles, 21*tot_data, &
MPI_doUBLE_PRECISION, status, ierr)
mpi_p, status, ierr)
do i = 1, tot_data
id = int(MPI_IO_DATA_lag_bubbles(i, 1))
inputvals(1:20) = MPI_IO_DATA_lag_bubbles(i, 2:21)
Expand Down Expand Up @@ -1776,7 +1776,7 @@ contains
end if

call MPI_type_CREATE_SUBARRAY(2, gsizes, lsizes, start_idx_part, &
MPI_ORDER_FORTRAN, MPI_doUBLE_PRECISION, view, ierr)
MPI_ORDER_FORTRAN, mpi_p, view, ierr)
call MPI_type_COMMIT(view, ierr)

allocate (MPI_IO_DATA_lag_bubbles(1:max(1, bub_id), 1:21))
Expand All @@ -1794,7 +1794,7 @@ contains

disp = 0._wp

call MPI_FILE_SET_VIEW(ifile, disp, MPI_doUBLE_PRECISION, view, &
call MPI_FILE_SET_VIEW(ifile, disp, mpi_p, view, &
'native', mpi_info_null, ierr)

! Cycle through list
Expand Down Expand Up @@ -1833,7 +1833,7 @@ contains
end if

call MPI_FILE_write_ALL(ifile, MPI_IO_DATA_lag_bubbles, 21*max(1, bub_id), &
MPI_doUBLE_PRECISION, status, ierr)
mpi_p, status, ierr)

call MPI_FILE_CLOSE(ifile, ierr)

Expand Down
4 changes: 2 additions & 2 deletions src/simulation/m_data_output.fpp
Original file line number Diff line number Diff line change
Expand Up @@ -972,10 +972,10 @@ contains
! Initial displacement to skip at beginning of file
disp = m_MOK*max(MOK, n_MOK)*max(MOK, p_MOK)*WP_MOK*(var_MOK - 1)

call MPI_FILE_SET_VIEW(ifile, disp, MPI_DOUBLE_PRECISION, MPI_IO_DATA%view(sys_size + 1), &
call MPI_FILE_SET_VIEW(ifile, disp, mpi_p, MPI_IO_DATA%view(sys_size + 1), &
'native', mpi_info_int, ierr)
call MPI_FILE_WRITE_ALL(ifile, MPI_IO_DATA%var(sys_size + 1)%sf, data_size, &
MPI_DOUBLE_PRECISION, status, ierr)
mpi_p, status, ierr)
end if

call MPI_FILE_CLOSE(ifile, ierr)
Expand Down
4 changes: 2 additions & 2 deletions src/simulation/m_mpi_proxy.fpp
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ contains
do i = 1, 3
#:for VAR in [ 'bc_x%vel_in', 'bc_x%vel_out', 'bc_y%vel_in', 'bc_y%vel_out', &
& 'bc_z%vel_in', 'bc_z%vel_out']
call MPI_BCAST(${VAR}$ (i), 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(${VAR}$ (i), 1, mpi_p, 0, MPI_COMM_WORLD, ierr)
#:endfor
end do
Expand All @@ -266,7 +266,7 @@ contains
do i = 1, num_fluids_max
#:for VAR in ['bc_x%alpha_rho_in','bc_x%alpha_in','bc_y%alpha_rho_in','bc_y%alpha_in','bc_z%alpha_rho_in','bc_z%alpha_in']
call MPI_BCAST(${VAR}$ (i), 1, MPI_DOUBLE_PRECISION, 0, MPI_COMM_WORLD, ierr)
call MPI_BCAST(${VAR}$ (i), 1, mpi_p, 0, MPI_COMM_WORLD, ierr)
#:endfor
end do
Expand Down

0 comments on commit 12de935

Please sign in to comment.