63 integer,
parameter :: INNER_POINT = 1
64 integer,
parameter :: ENLARGEMENT_POINT = 2
65 integer,
parameter :: BOUNDARY = -1
76 subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
77 class(mesh_t),
intent(inout) :: mesh
78 type(namespace_t),
intent(in) :: namespace
79 class(space_t),
intent(in) :: space
80 class(box_t),
target,
intent(in) :: box
81 class(coordinate_system_t),
target,
intent(in) :: coord_system
82 real(real64),
intent(in) :: spacing(1:space%dim)
83 integer,
intent(in) :: enlarge(1:space%dim)
85 integer :: idir, jj, delta
86 real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
93 safe_allocate(mesh%spacing(1:space%dim))
94 mesh%spacing = spacing
96 mesh%use_curvilinear = coord_system%local_basis
97 mesh%coord_system => coord_system
100 mesh%idx%enlarge = enlarge
103 select type (coord_system)
111 do idir = 1, space%dim
118 chi(idir) = real(jj, real64) * mesh%spacing(idir)
119 if (mesh%use_curvilinear)
then
120 x = coord_system%to_cartesian(chi)
127 mesh%idx%nr(2, idir) = jj - 1
131 mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
134 do idir = 1, space%periodic_dim
135 if (mesh%idx%nr(2, idir) == 0)
then
137 mesh%idx%nr(2, idir) = 1
138 mesh%idx%nr(1, idir) = -1
146 spacing_new(delta) =
m_two*maxval(abs(
box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
147 spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
150 delta = minloc(spacing_new, dim = 1) - 2
155 mesh%spacing(idir) =
m_two*maxval(abs(
box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
158 if (delta == -1)
then
159 mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
160 else if (delta == 1)
then
161 mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
166 if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) )
then
167 call messages_write(
'The spacing has been modified to make it commensurate with the periodicity of the system.')
171 do idir = space%periodic_dim + 1, space%dim
172 if (mesh%idx%nr(2, idir) == 0)
then
173 write(
message(1),
'(a,i2)')
'Spacing > box size in direction ', idir
178 mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
180 mesh%idx%stride(:) = 1
181 do idir = 2, space%dim+1
182 mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
183 (mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
197 class(
mesh_t),
intent(inout) :: mesh
199 class(
space_t),
intent(in) :: space
200 class(
box_t),
intent(in) :: box
202 logical,
optional,
intent(in) :: regenerate
205 real(real64) :: chi(1:space%dim)
206 real(real64) :: pos(space%dim)
207 integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
208 integer(int64) :: global_size
209 integer(int32) :: local_size
210 integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
211 integer :: ip, ib, ib2, np, np_boundary, ii
213 type(
lihash_t) :: spatial_to_boundary
214 integer(int64),
allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
215 integer(int64),
allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
216 integer(int64),
allocatable :: spatial_to_grid(:)
217 integer,
allocatable :: sizes(:)
218 integer(int64),
allocatable :: offsets(:)
219 integer :: size_boundary
221 integer(int64),
pointer :: ptr(:)
222 type(
mpi_grp_t) :: internode_grp, intranode_grp
229 mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
230 mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
247 grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
248 mesh%idx%offset = grid_sizes/2
249 if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64))))
then
250 write(
message(1),
'(A, I10, A, I2, A)')
"Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
251 " points in one direction for ", space%dim,
" dimensions. This is not supported."
254 global_size = product(int(grid_sizes, int64))
256 mesh%idx%bits = maxval(ceiling(
log(real(grid_sizes, real64) )/
log(2.)))
259 spatial_size = global_size
261 spatial_size = 2**(space%dim*mesh%idx%bits)
267 if (.not. (iend - istart + 1 < huge(0_int32)))
then
268 write(
message(1),
'(A, I10, A, I2, A)')
"Error: local grid too large, more than ", &
269 huge(0_int32),
" points. This is not supported. Maybe use more MPI ranks?"
272 local_size = int(iend - istart + 1, int32)
274 safe_allocate(grid_to_spatial_initial(1:local_size))
278 do ispatial = istart, iend
281 if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
282 if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
284 chi = real(point, real64) * mesh%spacing
285 pos = mesh%coord_system%to_cartesian(chi)
286 if (.not. box%contains_point(pos)) cycle
287 grid_to_spatial_initial(ip) = ispatial
288 assert(ip + 1 < huge(ip))
293 call rebalance_array(grid_to_spatial_initial(1:np), grid_to_spatial, sizes)
296 safe_deallocate_a(grid_to_spatial_initial)
298 safe_allocate(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
299 safe_deallocate_a(sizes)
302 do ispatial = grid_to_spatial(1), grid_to_spatial(np)
303 spatial_to_grid(ispatial) = -1
307 spatial_to_grid(grid_to_spatial(ip)) = ip
313 safe_allocate(boundary_to_spatial(1:size_boundary))
317 do is = 1, stencil%size
318 if (stencil%center == is) cycle
319 point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
322 assert(ispatialb >= 0)
323 if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
324 ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64))
then
325 if (spatial_to_grid(ispatialb) > 0) cycle
328 chi = real(point_stencil, real64) * mesh%spacing
329 pos = mesh%coord_system%to_cartesian(chi)
330 if (box%contains_point(pos)) cycle
335 boundary_to_spatial(ib) = ispatialb
339 if (ib >= size_boundary)
then
340 size_boundary = size_boundary * 2
347 safe_deallocate_a(spatial_to_grid)
350 call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
351 safe_deallocate_a(grid_to_spatial)
353 call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
355 mesh%np_global = sizes(0)
357 mesh%np_global = mesh%np_global + sizes(ii)
359 safe_deallocate_a(sizes)
360 safe_deallocate_a(grid_to_spatial_reordered)
364 call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
365 safe_deallocate_a(boundary_to_spatial)
367 call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
368 safe_deallocate_a(boundary_to_spatial_reordered)
372 mesh%np_part_global = mesh%np_global + sizes(0)
374 mesh%np_part_global = mesh%np_part_global + sizes(ii)
376 safe_deallocate_a(sizes)
382 call create_intranode_communicator(
mpi_world, intranode_grp, internode_grp)
383 call lmpi_create_shared_memory_window(mesh%np_part_global, intranode_grp, &
384 mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
386 safe_allocate(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
390 call mpi_world%gatherv(grid_to_spatial, np, mpi_integer8, &
391 mesh%idx%grid_to_spatial_global, sizes, offsets, mpi_integer8, 0)
395 call mpi_world%gatherv(boundary_to_spatial, np_boundary, mpi_integer8, &
396 mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, mpi_integer8, 0)
401 call lmpi_create_shared_memory_window(spatial_size, intranode_grp, &
402 mesh%idx%window_spatial_to_grid, ptr)
403 mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
405 safe_allocate(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
410 do ispatial = 0, spatial_size-1
411 mesh%idx%spatial_to_grid_global(ispatial) = 0
414 do ipg = 1, mesh%np_part_global
415 mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
421 if (intranode_grp%is_root())
then
422 call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, mpi_integer8, 0)
423 call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, mpi_integer8, 0)
425 call lmpi_sync_shared_memory_window(mesh%idx%window_grid_to_spatial, intranode_grp)
426 call lmpi_sync_shared_memory_window(mesh%idx%window_spatial_to_grid, intranode_grp)
429 safe_deallocate_a(offsets)
430 safe_deallocate_a(sizes)
432 safe_deallocate_a(boundary_to_spatial)
433 safe_deallocate_a(grid_to_spatial)
444 subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
445 class(
mesh_t),
intent(inout) :: mesh
447 class(
space_t),
intent(in) :: space
450 type(
mesh_t),
optional,
intent(in) :: parent
451 logical,
optional,
intent(in) :: regenerate
453 integer :: ip, jj(space%dim), np
460 mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
465 if (mesh%parallel_in_domains)
then
469 assert(mesh%np_part_global < huge(mesh%np_part))
471 mesh%np_part =
i8_to_i4(mesh%np_part_global)
475 mesh%pv%np_global = mesh%np_global
477 mesh%pv%np_bndry = mesh%np_part - mesh%np
483 safe_allocate(mesh%x(1:space%dim, 1:mesh%np_part))
484 safe_allocate(mesh%x_t(1:mesh%np_part, 1:space%dim))
485 do ip = 1, mesh%np_part
487 mesh%x_t(ip, 1:space%dim) = mesh%x(1:space%dim, ip)
491 safe_allocate(mesh%chi(1:space%dim, 1:mesh%np_part))
492 do ip = 1, mesh%np_part
494 mesh%chi(:, ip) = jj*mesh%spacing
500 if (mesh%coord_system%local_basis)
then
505 safe_allocate(mesh%jacobian_inverse(1:space%dim, 1:space%dim, np))
507 mesh%jacobian_inverse(:, :, ip) = mesh%coord_system%jacobian_inverse(mesh%chi(:, ip))
516 integer :: jj, ipart, jpart
517 integer(int64) :: ipg, jpg
518 integer,
allocatable :: gindex(:), gedges(:)
519 logical,
allocatable :: nb(:, :)
520 integer :: idx(space%dim), jx(space%dim)
521 type(mpi_comm) :: graph_comm
522 integer :: iedge, nnb
523 logical :: use_topo, reorder, partition_print
526 logical :: has_virtual_partition = .false.
528 type(
restart_t) :: restart_load, restart_dump
529 integer,
allocatable :: part_vec(:)
537 call restart_load%end()
552 call parse_variable(namespace,
'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
554 if (vsize /= mesh%mpi_grp%size)
then
555 write(
message(1),
'(a,I7)')
"Changing the partition size to", vsize
556 write(
message(2),
'(a)')
"The execution will crash."
558 has_virtual_partition = .
true.
560 has_virtual_partition = .false.
563 if (.not.
present(parent))
then
573 call restart_dump%end()
576 if (has_virtual_partition)
then
579 write(
message(1),
'(a)')
"Execution has ended."
580 write(
message(2),
'(a)')
"If you want to run your system, do not use MeshPartitionVirtualSize."
596 call parse_variable(namespace,
'MeshUseTopology', .false., use_topo)
600 safe_allocate(part_vec(1:mesh%np_global))
606 safe_allocate(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
609 do ipg = 1, mesh%np_global
610 ipart = part_vec(ipg)
612 do jj = 1, stencil%size
613 jx = idx + stencil%points(:, jj)
615 if (jpg > 0 .and. jpg <= mesh%np_global)
then
616 jpart = part_vec(jpg)
617 if (ipart /= jpart ) nb(ipart, jpart) = .
true.
621 safe_deallocate_a(part_vec)
625 safe_allocate(gindex(1:mesh%mpi_grp%size))
626 safe_allocate(gedges(1:count(nb)))
630 do ipart = 1, mesh%mpi_grp%size
631 do jpart = 1, mesh%mpi_grp%size
632 if (nb(ipart, jpart))
then
634 gedges(iedge) = jpart - 1
637 gindex(ipart) = iedge
640 assert(iedge == count(nb))
643 call mpi_graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm)
648 safe_deallocate_a(nb)
649 safe_deallocate_a(gindex)
650 safe_deallocate_a(gedges)
655 call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
656 space, mesh%partition, mesh%pv, namespace)
660 jpart = mesh%pv%partno
661 do ipart = 1, mesh%pv%npart
662 if (ipart == jpart) cycle
663 if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
665 assert(nnb >= 0 .and. nnb < mesh%pv%npart)
668 mesh%np = mesh%pv%np_local
669 mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
682 if (partition_print)
then
701 if (mesh%use_curvilinear) np = mesh%np_part
703 if (mesh%np_part == 0) np = 0
705 safe_allocate(mesh%vol_pp(1:np))
708 mesh%vol_pp(ip) = product(mesh%spacing)
712 mesh%vol_pp(ip) = mesh%vol_pp(ip)*mesh%coord_system%jacobian_determinant(mesh%chi(:, ip))
715 if (mesh%use_curvilinear .or. mesh%np_part == 0)
then
716 mesh%volume_element =
m_one
718 mesh%volume_element = mesh%vol_pp(1)
731 integer(int64),
contiguous,
intent(in) :: data_input(:)
732 integer(int64),
allocatable,
intent(out) :: data_output(:)
733 integer,
allocatable,
optional,
intent(out) :: output_sizes(:)
735 integer,
allocatable :: initial_sizes(:), final_sizes(:)
736 integer(int64),
allocatable :: initial_offsets(:), final_offsets(:)
737 integer,
allocatable :: scounts(:), sdispls(:)
738 integer,
allocatable :: rcounts(:), rdispls(:)
740 integer(int64) :: itmp
745 safe_allocate(initial_sizes(0:
mpi_world%size-1))
746 call mpi_world%allgather(
size(data_input), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
747 safe_allocate(initial_offsets(0:
mpi_world%size))
748 initial_offsets(0) = 0
750 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
755 safe_allocate(final_offsets(0:
mpi_world%size))
756 safe_allocate(final_sizes(0:
mpi_world%size-1))
759 final_offsets(irank) = sum(int(initial_sizes, int64)) * irank/
mpi_world%size
762 assert(final_offsets(irank + 1) - final_offsets(irank) < huge(0_int32))
763 final_sizes(irank) = int(final_offsets(irank + 1) - final_offsets(irank), int32)
766 safe_allocate(scounts(0:
mpi_world%size-1))
767 safe_allocate(sdispls(0:
mpi_world%size-1))
768 safe_allocate(rcounts(0:
mpi_world%size-1))
769 safe_allocate(rdispls(0:
mpi_world%size-1))
774 itmp = min(final_offsets(irank+1), initial_offsets(
mpi_world%rank+1)) - &
775 max(final_offsets(irank), initial_offsets(
mpi_world%rank))
776 assert(itmp < huge(0_int32))
780 scounts(irank) = int(itmp, int32)
785 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
787 assert(sum(int(scounts, int64)) < huge(0_int32))
788 assert(sum(scounts) == initial_sizes(
mpi_world%rank))
793 itmp = min(final_offsets(
mpi_world%rank+1), initial_offsets(irank+1)) - &
794 max(final_offsets(
mpi_world%rank), initial_offsets(irank))
795 assert(itmp < huge(0_int32))
799 rcounts(irank) = int(itmp, int32)
804 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
807 assert(sum(rcounts) == final_sizes(
mpi_world%rank))
809 safe_allocate(data_output(1:final_sizes(
mpi_world%rank)))
810 call mpi_world%alltoallv(data_input, scounts, sdispls, mpi_integer8, &
811 data_output, rcounts, rdispls, mpi_integer8)
814 if (
present(output_sizes))
then
815 safe_allocate(output_sizes(0:
mpi_world%size-1))
816 output_sizes(:) = final_sizes(:)
819 safe_deallocate_a(final_offsets)
820 safe_deallocate_a(final_sizes)
822 safe_deallocate_a(scounts)
823 safe_deallocate_a(sdispls)
824 safe_deallocate_a(rcounts)
825 safe_deallocate_a(rdispls)
835 subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
837 class(
space_t),
intent(in) :: space
838 type(
index_t),
intent(in) :: idx
839 integer(int64),
intent(in) :: grid_to_spatial(:)
840 integer(int64),
allocatable,
intent(out) :: grid_to_spatial_reordered(:)
842 integer :: bsize(space%dim), order, default
843 integer :: nn, idir, ipg, ip, number_of_blocks(space%dim)
845 integer,
parameter :: &
847 order_original = 2, &
849 integer :: point(1:space%dim)
850 integer(int64),
allocatable :: reorder_indices(:), reorder_recv(:)
851 integer,
allocatable :: index_map(:), indices(:)
852 integer(int64),
allocatable :: grid_to_spatial_recv(:)
853 integer,
allocatable :: initial_sizes(:)
854 integer(int64),
allocatable :: initial_offsets(:)
855 integer(int64) :: istart, iend, indstart, indend, spatial_size
856 integer :: irank, local_size, num_recv
857 integer :: iunique, nunique
859 logical :: increase_with_dimension
861 integer,
allocatable :: scounts(:), sdispls(:), rcounts(:), rdispls(:)
862 integer(int64),
allocatable :: spatial_cutoff(:)
884 default = order_blocks
887 if (space%dim == 1)
then
888 order = order_original
906 call parse_variable(namespace,
'MeshBlockDirection', 1, direction)
907 increase_with_dimension = direction == 1
908 if (direction /= 1 .and. direction /= 2)
then
913 case (order_original)
915 safe_allocate(grid_to_spatial_reordered(1:
size(grid_to_spatial)))
916 grid_to_spatial_reordered(1:
size(grid_to_spatial)) = grid_to_spatial(1:
size(grid_to_spatial))
917 case (order_blocks, order_cube)
918 if (order == order_cube)
then
919 bsize = idx%nr(2, :) - idx%nr(1, :) + 1
932 if (
conf%target_states_block_size < 16)
then
933 bsize(1) = 80 * 4 / abs(
conf%target_states_block_size)
934 if (space%dim > 1) bsize(2) = 4
935 if (space%dim > 2) bsize(3:) = 10
937 bsize(1) = max(4 * 16 / abs(
conf%target_states_block_size), 1)
938 if (space%dim > 1) bsize(2) = 15
939 if (space%dim > 2) bsize(3:) = 15
942 if (
parse_block(namespace,
'MeshBlockSize', blk) == 0)
then
944 if (nn /= space%dim)
then
945 message(1) =
"Error: number of entries in MeshBlockSize must match the number of dimensions."
954 number_of_blocks = (idx%nr(2, :) - idx%nr(1, :) + 1) / bsize + 1
961 safe_allocate(initial_sizes(0:
mpi_world%size-1))
962 call mpi_world%allgather(
size(grid_to_spatial), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
963 safe_allocate(initial_offsets(0:
mpi_world%size))
964 initial_offsets(0) = 0
966 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
971 iend = initial_offsets(
mpi_world%rank + 1) - 1
972 assert(iend - istart + 1 < huge(0_int32))
973 local_size = int(iend - istart + 1, int32)
974 assert(local_size == initial_sizes(
mpi_world%rank))
977 safe_allocate(reorder_indices(1:local_size))
978 safe_allocate(indices(1:local_size))
979 safe_allocate(grid_to_spatial_reordered(1:local_size))
981 do ip = 1, local_size
983 point = point + idx%offset
984 reorder_indices(ip) =
get_blocked_index(space%dim, point, bsize, number_of_blocks, increase_with_dimension)
988 call sort(reorder_indices, indices)
991 do ip = 1, local_size
992 grid_to_spatial_reordered(ip) = grid_to_spatial(indices(ip))
996 if(local_size > 0)
then
997 indstart = reorder_indices(1)
998 indend = reorder_indices(local_size)
1000 indstart = huge(1_int64)
1003 call mpi_world%allreduce_inplace(indstart, 1, mpi_integer8, mpi_min)
1004 call mpi_world%allreduce_inplace(indend, 1, mpi_integer8, mpi_max)
1005 spatial_size = indend - indstart + 1
1008 safe_allocate(spatial_cutoff(0:
mpi_world%size-1))
1010 spatial_cutoff(irank) = spatial_size * (irank+1)/
mpi_world%size + indstart
1013 safe_allocate(scounts(0:
mpi_world%size-1))
1014 safe_allocate(sdispls(0:
mpi_world%size-1))
1015 safe_allocate(rcounts(0:
mpi_world%size-1))
1016 safe_allocate(rdispls(0:
mpi_world%size-1))
1022 do ip = 1, local_size
1023 if (reorder_indices(ip) >= spatial_cutoff(irank))
then
1025 do while (reorder_indices(ip) >= spatial_cutoff(irank))
1030 scounts(irank) = scounts(irank) + 1
1032 safe_deallocate_a(spatial_cutoff)
1033 assert(sum(scounts) == local_size)
1038 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
1041 call mpi_world%alltoall(scounts, 1, mpi_integer, &
1042 rcounts, 1, mpi_integer)
1046 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
1050 num_recv = max(sum(rcounts), 1)
1052 safe_allocate(reorder_recv(1:num_recv))
1053 call mpi_world%alltoallv(reorder_indices, scounts, sdispls, mpi_integer8, &
1054 reorder_recv, rcounts, rdispls, mpi_integer8)
1055 safe_deallocate_a(reorder_indices)
1058 safe_allocate(grid_to_spatial_recv(1:num_recv))
1059 call mpi_world%alltoallv(grid_to_spatial_reordered, scounts, sdispls, mpi_integer8, &
1060 grid_to_spatial_recv, rcounts, rdispls, mpi_integer8)
1061 safe_deallocate_a(grid_to_spatial_reordered)
1064 safe_allocate(reorder_indices(1:num_recv))
1065 safe_allocate(index_map(1:num_recv))
1066 if (sum(rcounts) > 0)
then
1071 do ipg = 2, sum(rcounts)
1072 if (reorder_indices(ipg) /= reorder_indices(ipg-1))
then
1073 nunique = nunique + 1
1078 safe_allocate(grid_to_spatial_reordered(1:nunique))
1080 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(1))
1081 do ipg = 2, sum(rcounts)
1082 if (reorder_indices(ipg) /= reorder_indices(ipg-1))
then
1083 iunique = iunique + 1
1084 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(ipg))
1088 safe_allocate(grid_to_spatial_reordered(1:0))
1091 safe_deallocate_a(initial_offsets)
1092 safe_deallocate_a(initial_sizes)
1094 safe_deallocate_a(reorder_indices)
1095 safe_deallocate_a(reorder_recv)
1097 safe_deallocate_a(grid_to_spatial_recv)
1098 safe_deallocate_a(index_map)
1099 safe_deallocate_a(indices)
1101 safe_deallocate_a(scounts)
1102 safe_deallocate_a(sdispls)
1103 safe_deallocate_a(rcounts)
1104 safe_deallocate_a(rdispls)
1113 integer,
intent(in) :: local_size
1114 integer,
allocatable,
intent(out) :: sizes(:)
1115 integer(int64),
allocatable,
intent(out) :: offsets(:)
1116 type(
mpi_grp_t),
optional,
intent(in) :: mpi_grp
1123 if (
present(mpi_grp))
then
1129 safe_allocate(sizes(0:mpi_grp_%size-1))
1130 call mpi_grp_%allgather(local_size, 1, mpi_integer, sizes(0), 1, mpi_integer)
1131 safe_allocate(offsets(0:mpi_grp_%size))
1133 do irank = 1, mpi_grp_%size
1134 offsets(irank) = offsets(irank-1) + sizes(irank-1)
Box bounds along some axes.
This is the common interface to a sorting routine. It performs the shell algorithm,...
double log(double __x) __attribute__((__nothrow__
subroutine do_partition()
subroutine mesh_get_vol_pp()
calculate the volume of integration
real(real64), parameter, public box_boundary_delta
This module handles the calculation mode.
integer, parameter, public p_strategy_domains
parallelization in domains
real(real64), parameter, public m_two
subroutine, public global_end()
Finalise parser varinfo file, and MPI.
real(real64), parameter, public m_zero
type(conf_t), public conf
Global instance of Octopus configuration.
real(real64), parameter, public m_one
This module implements a simple hash table for non-negative integer keys and integer values.
integer function, public lihash_lookup(h, key, found)
Look up a value in the hash table h. If found is present, it indicates if key could be found in the t...
subroutine, public lihash_insert(h, key, val)
Insert a (key, val) pair into the hash table h.
subroutine, public lihash_end(h)
Free a hash table.
subroutine, public lihash_init(h)
Initialize a hash table h.
This module implements the index, used for the mesh points.
subroutine, public index_init(idx, dim)
This subroutine allocates memory and initializes some components.
integer(int64) function, public get_blocked_index(dim, point, bsize, number_of_blocks, increase_with_dimensions)
subroutine, public index_spatial_to_point(idx, dim, ispatial, point)
integer, parameter, public idx_hilbert
subroutine, public index_point_to_spatial(idx, dim, ispatial, point)
integer, parameter, public idx_cubic
This module is intended to contain "only mathematical" functions and procedures.
subroutine, public merge_sorted_arrays(array, sizes, merged, index_map)
This module contains subroutines, related to the initialization of the mesh.
subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
reorder the points in the grid according to the variables MeshOrder and MeshLocalOrder
subroutine rebalance_array(data_input, data_output, output_sizes)
re-distribute the points to improve load balancing
subroutine, public mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
When running parallel in domains, stencil and np_stencil are needed to compute the ghost points....
subroutine, public mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
First stage mesh initialization.
subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
return the sizes and offsets of a distributed array for all tasks of a mpi group.
subroutine, public mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
This subroutine creates the global array of spatial indices and the inverse mapping.
This module defines the meshes, which are used in Octopus.
subroutine, public mesh_global_index_to_coords(mesh, ipg, ix)
Given a global point index, this function returns the set of integer coordinates of the point.
integer(int64) function, public mesh_global_index_from_coords(mesh, ix)
This function returns the true global index of the point for a given vector of integer coordinates.
subroutine, public mesh_local_index_to_coords(mesh, ip, ix)
Given a local point index, this function returns the set of integer coordinates of the point.
integer(int64) function, public mesh_local2global(mesh, ip)
This function returns the global mesh index for a given local index.
real(real64) function, dimension(1:mesh%box%dim), public mesh_x_global(mesh, ipg)
Given a global point index, this function returns the coordinates of the point.
subroutine, public mesh_partition_from_parent(mesh, parent)
create a mesh partition from a given parent mesh
subroutine, public mesh_partition_write_info(mesh, iunit, namespace)
subroutine, public mesh_partition_messages_debug(mesh, namespace)
subroutine, public mesh_partition(mesh, namespace, space, lapl_stencil, vsize)
This routine converts the mesh given by grid points into a graph.
subroutine, public mesh_partition_dump(restart, mesh, vsize, ierr)
subroutine, public mesh_partition_load(restart, mesh, ierr)
subroutine, public messages_end()
subroutine, public messages_warning(no_lines, all_nodes, namespace)
subroutine, public print_date(str)
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
subroutine, public messages_input_error(namespace, var, details, row, column)
This module contains some common usage patterns of MPI routines.
type(mpi_grp_t), public mpi_world
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
This module handles the communicators for the various parallelization strategies.
Some general things and nomenclature:
subroutine, public par_vec_end(pv)
Deallocate memory used by pv.
subroutine, public par_vec_init(mpi_grp, np_global, idx, stencil, space, partition, pv, namespace)
Initializes a par_vec_type object (parallel vector).
integer function, public parse_block(namespace, name, blk, check_varinfo_)
subroutine, public partition_get_global(partition, part_global, root)
Returns the global partition. If root is present, the partition is gathered only in that node....
subroutine, public profiling_end(namespace)
integer, parameter, public restart_partition
integer, parameter, public restart_type_dump
integer, parameter, public restart_type_load
This module is intended to contain "only mathematical" functions and procedures.
This module defines stencils used in Octopus.
This module is intended to contain simple general-purpose utility functions and procedures.
subroutine, public make_array_larger(array, new_size)
class to tell whether a point is inside or outside
Describes mesh distribution to nodes.
This is defined even when running serial.
Stores all communicators and groups.
The class representing the stencil, which is used for non-local mesh operations.