63 integer,
parameter :: INNER_POINT = 1
64 integer,
parameter :: ENLARGEMENT_POINT = 2
65 integer,
parameter :: BOUNDARY = -1
76 subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
77 class(mesh_t),
intent(inout) :: mesh
78 type(namespace_t),
intent(in) :: namespace
79 class(space_t),
intent(in) :: space
80 class(box_t),
target,
intent(in) :: box
81 class(coordinate_system_t),
target,
intent(in) :: coord_system
82 real(real64),
intent(in) :: spacing(1:space%dim)
83 integer,
intent(in) :: enlarge(1:space%dim)
85 integer :: idir, jj, delta
86 real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
93 safe_allocate(mesh%spacing(1:space%dim))
94 mesh%spacing = spacing
96 mesh%use_curvilinear = coord_system%local_basis
97 mesh%coord_system => coord_system
100 mesh%idx%enlarge = enlarge
103 select type (coord_system)
111 do idir = 1, space%dim
118 chi(idir) = real(jj, real64) * mesh%spacing(idir)
119 if (mesh%use_curvilinear)
then
120 x = coord_system%to_cartesian(chi)
127 mesh%idx%nr(2, idir) = jj - 1
131 mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
134 do idir = 1, space%periodic_dim
135 if (mesh%idx%nr(2, idir) == 0)
then
137 mesh%idx%nr(2, idir) = 1
138 mesh%idx%nr(1, idir) = -1
146 spacing_new(delta) =
m_two*maxval(abs(
box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
147 spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
150 delta = minloc(spacing_new, dim = 1) - 2
155 mesh%spacing(idir) =
m_two*maxval(abs(
box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
158 if (delta == -1)
then
159 mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
160 else if (delta == 1)
then
161 mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
166 if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) )
then
167 call messages_write(
'The spacing has been modified to make it commensurate with the periodicity of the system.')
171 do idir = space%periodic_dim + 1, space%dim
172 if (mesh%idx%nr(2, idir) == 0)
then
173 write(
message(1),
'(a,i2)')
'Spacing > box size in direction ', idir
178 mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
180 mesh%idx%stride(:) = 1
181 do idir = 2, space%dim+1
182 mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
183 (mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
197 class(
mesh_t),
intent(inout) :: mesh
199 class(
space_t),
intent(in) :: space
200 class(
box_t),
intent(in) :: box
202 logical,
optional,
intent(in) :: regenerate
205 real(real64) :: chi(1:space%dim)
206 real(real64) :: pos(space%dim)
207 integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
208 integer(int64) :: global_size
209 integer(int32) :: local_size
210 integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
211 integer :: ip, ib, ib2, np, np_boundary, ii
213 type(
lihash_t) :: spatial_to_boundary
214 integer(int64),
allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
215 integer(int64),
allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
216 integer(int64),
allocatable :: spatial_to_grid(:)
217 integer,
allocatable :: sizes(:)
218 integer(int64),
allocatable :: offsets(:)
219 integer :: size_boundary
221 integer(int64),
pointer :: ptr(:)
222 type(
mpi_grp_t) :: internode_grp, intranode_grp
229 mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
230 mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
247 grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
248 mesh%idx%offset = grid_sizes/2
249 if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64))))
then
250 write(
message(1),
'(A, I10, A, I2, A)')
"Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
251 " points in one direction for ", space%dim,
" dimensions. This is not supported."
254 global_size = product(int(grid_sizes, int64))
256 mesh%idx%bits = maxval(ceiling(
log(real(grid_sizes, real64) )/
log(2.)))
259 spatial_size = global_size
261 spatial_size = 2**(space%dim*mesh%idx%bits)
267 assert(iend - istart + 1 < huge(0_int32))
268 local_size = int(iend - istart + 1, int32)
270 safe_allocate(grid_to_spatial_initial(1:local_size))
274 do ispatial = istart, iend
277 if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
278 if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
280 chi = real(point, real64) * mesh%spacing
281 pos = mesh%coord_system%to_cartesian(chi)
282 if (.not. box%contains_point(pos)) cycle
283 grid_to_spatial_initial(ip) = ispatial
284 assert(ip + 1 < huge(ip))
292 safe_deallocate_a(grid_to_spatial_initial)
294 safe_allocate(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
295 safe_deallocate_a(sizes)
298 do ispatial = grid_to_spatial(1), grid_to_spatial(np)
299 spatial_to_grid(ispatial) = -1
303 spatial_to_grid(grid_to_spatial(ip)) = ip
309 safe_allocate(boundary_to_spatial(1:size_boundary))
313 do is = 1, stencil%size
314 if (stencil%center == is) cycle
315 point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
318 assert(ispatialb >= 0)
319 if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
320 ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64))
then
321 if (spatial_to_grid(ispatialb) > 0) cycle
324 chi = real(point_stencil, real64) * mesh%spacing
325 pos = mesh%coord_system%to_cartesian(chi)
326 if (box%contains_point(pos)) cycle
331 boundary_to_spatial(ib) = ispatialb
335 if (ib >= size_boundary)
then
336 size_boundary = size_boundary * 2
343 safe_deallocate_a(spatial_to_grid)
346 call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
347 safe_deallocate_a(grid_to_spatial)
349 call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
351 mesh%np_global = sizes(0)
353 mesh%np_global = mesh%np_global + sizes(ii)
355 safe_deallocate_a(sizes)
356 safe_deallocate_a(grid_to_spatial_reordered)
360 call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
361 safe_deallocate_a(boundary_to_spatial)
363 call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
364 safe_deallocate_a(boundary_to_spatial_reordered)
368 mesh%np_part_global = mesh%np_global + sizes(0)
370 mesh%np_part_global = mesh%np_part_global + sizes(ii)
372 safe_deallocate_a(sizes)
380 mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
382 safe_allocate(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
386 call mpi_world%gatherv(grid_to_spatial, np, mpi_integer8, &
387 mesh%idx%grid_to_spatial_global, sizes, offsets, mpi_integer8, 0)
391 call mpi_world%gatherv(boundary_to_spatial, np_boundary, mpi_integer8, &
392 mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, mpi_integer8, 0)
398 mesh%idx%window_spatial_to_grid, ptr)
399 mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
401 safe_allocate(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
406 do ispatial = 0, spatial_size-1
407 mesh%idx%spatial_to_grid_global(ispatial) = 0
410 do ipg = 1, mesh%np_part_global
411 mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
417 if (intranode_grp%rank == 0)
then
418 call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, mpi_integer8, 0)
419 call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, mpi_integer8, 0)
425 safe_deallocate_a(offsets)
426 safe_deallocate_a(sizes)
428 safe_deallocate_a(boundary_to_spatial)
429 safe_deallocate_a(grid_to_spatial)
440 subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
441 class(
mesh_t),
intent(inout) :: mesh
443 class(
space_t),
intent(in) :: space
446 type(
mesh_t),
optional,
intent(in) :: parent
447 logical,
optional,
intent(in) :: regenerate
456 mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
461 if (mesh%parallel_in_domains)
then
465 assert(mesh%np_part_global < huge(mesh%np_part))
467 mesh%np_part =
i8_to_i4(mesh%np_part_global)
471 mesh%pv%np_global = mesh%np_global
473 mesh%pv%np_bndry = mesh%np_part - mesh%np
479 safe_allocate(mesh%x(1:mesh%np_part, 1:space%dim))
481 do ip = 1, mesh%np_part
482 mesh%x(ip, 1:space%dim) =
m_zero
485 do ip = 1, mesh%np_part
497 integer :: jj, ipart, jpart
498 integer(int64) :: ipg
499 integer,
allocatable :: gindex(:), gedges(:)
500 logical,
allocatable :: nb(:, :)
501 integer :: idx(space%dim), jx(space%dim)
502 type(mpi_comm) :: graph_comm
503 integer :: iedge, nnb
504 logical :: use_topo, reorder, partition_print
507 logical :: has_virtual_partition = .false.
509 type(
restart_t) :: restart_load, restart_dump
510 integer,
allocatable :: part_vec(:)
533 call parse_variable(namespace,
'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
535 if (vsize /= mesh%mpi_grp%size)
then
536 write(
message(1),
'(a,I7)')
"Changing the partition size to", vsize
537 write(
message(2),
'(a)')
"The execution will crash."
539 has_virtual_partition = .
true.
541 has_virtual_partition = .false.
544 if (.not.
present(parent))
then
557 if (has_virtual_partition)
then
560 write(
message(1),
'(a)')
"Execution has ended."
561 write(
message(2),
'(a)')
"If you want to run your system, do not use MeshPartitionVirtualSize."
577 call parse_variable(namespace,
'MeshUseTopology', .false., use_topo)
581 safe_allocate(part_vec(1:mesh%np_part_global))
587 safe_allocate(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
590 do ipg = 1, mesh%np_global
591 ipart = part_vec(ipg)
593 do jj = 1, stencil%size
594 jx = idx + stencil%points(:, jj)
595 if (all(jx >= mesh%idx%nr(1, :)) .and. all(jx <= mesh%idx%nr(2, :)))
then
597 if (ipart /= jpart ) nb(ipart, jpart) = .
true.
601 safe_deallocate_a(part_vec)
605 safe_allocate(gindex(1:mesh%mpi_grp%size))
606 safe_allocate(gedges(1:count(nb)))
610 do ipart = 1, mesh%mpi_grp%size
611 do jpart = 1, mesh%mpi_grp%size
612 if (nb(ipart, jpart))
then
614 gedges(iedge) = jpart - 1
617 gindex(ipart) = iedge
620 assert(iedge == count(nb))
623 call mpi_graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm,
mpi_err)
628 safe_deallocate_a(nb)
629 safe_deallocate_a(gindex)
630 safe_deallocate_a(gedges)
635 call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
636 space, mesh%partition, mesh%pv, namespace)
640 jpart = mesh%pv%partno
641 do ipart = 1, mesh%pv%npart
642 if (ipart == jpart) cycle
643 if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
645 assert(nnb >= 0 .and. nnb < mesh%pv%npart)
648 mesh%np = mesh%pv%np_local
649 mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
662 if (partition_print)
then
676 integer :: jj(space%dim), ip, np
677 real(real64) :: chi(space%dim)
682 if (mesh%use_curvilinear) np = mesh%np_part
684 if (mesh%np_part == 0) np = 0
686 safe_allocate(mesh%vol_pp(1:np))
689 mesh%vol_pp(ip) = product(mesh%spacing)
694 chi = jj*mesh%spacing
695 mesh%vol_pp(ip) = mesh%vol_pp(ip)*mesh%coord_system%det_Jac(mesh%x(ip, :), chi)
698 if (mesh%use_curvilinear .or. mesh%np_part == 0)
then
699 mesh%volume_element =
m_one
701 mesh%volume_element = mesh%vol_pp(1)
714 integer(int64),
contiguous,
intent(in) :: data_input(:)
715 integer(int64),
allocatable,
intent(out) :: data_output(:)
716 integer,
allocatable,
optional,
intent(out) :: output_sizes(:)
718 integer,
allocatable :: initial_sizes(:), final_sizes(:)
719 integer(int64),
allocatable :: initial_offsets(:), final_offsets(:)
720 integer,
allocatable :: scounts(:), sdispls(:)
721 integer,
allocatable :: rcounts(:), rdispls(:)
723 integer(int64) :: itmp
728 safe_allocate(initial_sizes(0:
mpi_world%size-1))
729 call mpi_world%allgather(
size(data_input), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
730 safe_allocate(initial_offsets(0:
mpi_world%size))
731 initial_offsets(0) = 0
733 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
738 safe_allocate(final_offsets(0:
mpi_world%size))
739 safe_allocate(final_sizes(0:
mpi_world%size-1))
742 final_offsets(irank) = sum(int(initial_sizes, int64)) * irank/
mpi_world%size
745 assert(final_offsets(irank + 1) - final_offsets(irank) < huge(0_int32))
746 final_sizes(irank) = int(final_offsets(irank + 1) - final_offsets(irank), int32)
749 safe_allocate(scounts(0:
mpi_world%size-1))
750 safe_allocate(sdispls(0:
mpi_world%size-1))
751 safe_allocate(rcounts(0:
mpi_world%size-1))
752 safe_allocate(rdispls(0:
mpi_world%size-1))
757 itmp = min(final_offsets(irank+1), initial_offsets(
mpi_world%rank+1)) - &
758 max(final_offsets(irank), initial_offsets(
mpi_world%rank))
759 assert(itmp < huge(0_int32))
763 scounts(irank) = int(itmp, int32)
768 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
770 assert(sum(int(scounts, int64)) < huge(0_int32))
771 assert(sum(scounts) == initial_sizes(
mpi_world%rank))
776 itmp = min(final_offsets(
mpi_world%rank+1), initial_offsets(irank+1)) - &
777 max(final_offsets(
mpi_world%rank), initial_offsets(irank))
778 assert(itmp < huge(0_int32))
782 rcounts(irank) = int(itmp, int32)
787 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
790 assert(sum(rcounts) == final_sizes(
mpi_world%rank))
792 safe_allocate(data_output(1:final_sizes(
mpi_world%rank)))
793 call mpi_world%alltoallv(data_input, scounts, sdispls, mpi_integer8, &
794 data_output, rcounts, rdispls, mpi_integer8)
797 if (
present(output_sizes))
then
798 safe_allocate(output_sizes(0:
mpi_world%size-1))
799 output_sizes(:) = final_sizes(:)
809 subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
811 class(
space_t),
intent(in) :: space
812 type(
index_t),
intent(in) :: idx
813 integer(int64),
intent(in) :: grid_to_spatial(:)
814 integer(int64),
allocatable,
intent(out) :: grid_to_spatial_reordered(:)
816 integer :: bsize(space%dim), order, default
817 integer :: nn, idir, ipg, ip, number_of_blocks(space%dim)
819 integer,
parameter :: &
821 order_original = 2, &
823 integer :: point(1:space%dim)
824 integer(int64),
allocatable :: reorder_indices(:), reorder_recv(:)
825 integer,
allocatable :: index_map(:), indices(:)
826 integer(int64),
allocatable :: grid_to_spatial_recv(:)
827 integer,
allocatable :: initial_sizes(:)
828 integer(int64),
allocatable :: initial_offsets(:)
829 integer(int64) :: istart, iend, indstart, indend, spatial_size
830 integer :: irank, local_size, num_recv
831 integer :: iunique, nunique
833 logical :: increase_with_dimension
835 integer,
allocatable :: scounts(:), sdispls(:), rcounts(:), rdispls(:)
836 integer(int64),
allocatable :: spatial_cutoff(:)
858 default = order_blocks
861 if (space%dim == 1)
then
862 order = order_original
880 call parse_variable(namespace,
'MeshBlockDirection', 1, direction)
881 increase_with_dimension = direction == 1
882 if (direction /= 1 .and. direction /= 2)
then
887 case (order_original)
889 safe_allocate(grid_to_spatial_reordered(1:
size(grid_to_spatial)))
890 grid_to_spatial_reordered(1:
size(grid_to_spatial)) = grid_to_spatial(1:
size(grid_to_spatial))
891 case (order_blocks, order_cube)
892 if (order == order_cube)
then
893 bsize = idx%nr(2, :) - idx%nr(1, :) + 1
906 if (
conf%target_states_block_size < 16)
then
907 bsize(1) = 80 * 4 / abs(
conf%target_states_block_size)
908 if (space%dim > 1) bsize(2) = 4
909 if (space%dim > 2) bsize(3:) = 10
911 bsize(1) = max(4 * 16 / abs(
conf%target_states_block_size), 1)
912 if (space%dim > 1) bsize(2) = 15
913 if (space%dim > 2) bsize(3:) = 15
916 if (
parse_block(namespace,
'MeshBlockSize', blk) == 0)
then
918 if (nn /= space%dim)
then
919 message(1) =
"Error: number of entries in MeshBlockSize must match the number of dimensions."
928 number_of_blocks = (idx%nr(2, :) - idx%nr(1, :) + 1) / bsize + 1
935 safe_allocate(initial_sizes(0:
mpi_world%size-1))
936 call mpi_world%allgather(
size(grid_to_spatial), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
937 safe_allocate(initial_offsets(0:
mpi_world%size))
938 initial_offsets(0) = 0
940 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
945 iend = initial_offsets(
mpi_world%rank + 1) - 1
946 assert(iend - istart + 1 < huge(0_int32))
947 local_size = int(iend - istart + 1, int32)
948 assert(local_size == initial_sizes(
mpi_world%rank))
951 safe_allocate(reorder_indices(1:local_size))
952 safe_allocate(indices(1:local_size))
953 safe_allocate(grid_to_spatial_reordered(1:local_size))
955 do ip = 1, local_size
957 point = point + idx%offset
958 reorder_indices(ip) =
get_blocked_index(space%dim, point, bsize, number_of_blocks, increase_with_dimension)
962 call sort(reorder_indices, indices)
965 do ip = 1, local_size
966 grid_to_spatial_reordered(ip) = grid_to_spatial(indices(ip))
970 indstart = reorder_indices(1)
971 indend = reorder_indices(local_size)
972 call mpi_world%allreduce_inplace(indstart, 1, mpi_integer8, mpi_min)
973 call mpi_world%allreduce_inplace(indend, 1, mpi_integer8, mpi_max)
974 spatial_size = indend - indstart + 1
977 safe_allocate(spatial_cutoff(0:
mpi_world%size-1))
979 spatial_cutoff(irank) = spatial_size * (irank+1)/
mpi_world%size + indstart
982 safe_allocate(scounts(0:
mpi_world%size-1))
983 safe_allocate(sdispls(0:
mpi_world%size-1))
984 safe_allocate(rcounts(0:
mpi_world%size-1))
985 safe_allocate(rdispls(0:
mpi_world%size-1))
991 do ip = 1, local_size
992 if (reorder_indices(ip) >= spatial_cutoff(irank))
then
994 do while (reorder_indices(ip) >= spatial_cutoff(irank))
999 scounts(irank) = scounts(irank) + 1
1001 safe_deallocate_a(spatial_cutoff)
1002 assert(sum(scounts) == local_size)
1007 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
1010 call mpi_world%alltoall(scounts, 1, mpi_integer, &
1011 rcounts, 1, mpi_integer)
1015 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
1019 num_recv = max(sum(rcounts), 1)
1021 safe_allocate(reorder_recv(1:num_recv))
1022 call mpi_world%alltoallv(reorder_indices, scounts, sdispls, mpi_integer8, &
1023 reorder_recv, rcounts, rdispls, mpi_integer8)
1024 safe_deallocate_a(reorder_indices)
1027 safe_allocate(grid_to_spatial_recv(1:num_recv))
1028 call mpi_world%alltoallv(grid_to_spatial_reordered, scounts, sdispls, mpi_integer8, &
1029 grid_to_spatial_recv, rcounts, rdispls, mpi_integer8)
1030 safe_deallocate_a(grid_to_spatial_reordered)
1033 safe_allocate(reorder_indices(1:num_recv))
1034 safe_allocate(index_map(1:num_recv))
1035 if (sum(rcounts) > 0)
then
1040 do ipg = 2, sum(rcounts)
1041 if (reorder_indices(ipg) /= reorder_indices(ipg-1))
then
1042 nunique = nunique + 1
1047 safe_allocate(grid_to_spatial_reordered(1:nunique))
1049 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(1))
1050 do ipg = 2, sum(rcounts)
1051 if (reorder_indices(ipg) /= reorder_indices(ipg-1))
then
1052 iunique = iunique + 1
1053 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(ipg))
1057 safe_allocate(grid_to_spatial_reordered(1:0))
1060 safe_deallocate_a(initial_offsets)
1061 safe_deallocate_a(initial_sizes)
1063 safe_deallocate_a(reorder_indices)
1064 safe_deallocate_a(reorder_recv)
1066 safe_deallocate_a(grid_to_spatial_recv)
1067 safe_deallocate_a(index_map)
1068 safe_deallocate_a(indices)
1070 safe_deallocate_a(scounts)
1071 safe_deallocate_a(sdispls)
1072 safe_deallocate_a(rcounts)
1073 safe_deallocate_a(rdispls)
1082 integer,
intent(in) :: local_size
1083 integer,
allocatable,
intent(out) :: sizes(:)
1084 integer(int64),
allocatable,
intent(out) :: offsets(:)
1085 type(
mpi_grp_t),
optional,
intent(in) :: mpi_grp
1092 if (
present(mpi_grp))
then
1098 safe_allocate(sizes(0:mpi_grp_%size-1))
1099 call mpi_grp_%allgather(local_size, 1, mpi_integer, sizes(0), 1, mpi_integer)
1100 safe_allocate(offsets(0:mpi_grp_%size))
1102 do irank = 1, mpi_grp_%size
1103 offsets(irank) = offsets(irank-1) + sizes(irank-1)
Box bounds along some axes.
This is the common interface to a sorting routine. It performs the shell algorithm,...
double log(double __x) __attribute__((__nothrow__
subroutine do_partition()
subroutine mesh_get_vol_pp()
calculate the volume of integration
real(real64), parameter, public box_boundary_delta
This module handles the calculation mode.
integer, parameter, public p_strategy_domains
parallelization in domains
real(real64), parameter, public m_two
subroutine, public global_end()
Finalise parser varinfo file, and MPI.
real(real64), parameter, public m_zero
type(conf_t), public conf
Global instance of Octopus configuration.
real(real64), parameter, public m_one
This module implements a simple hash table for non-negative integer keys and integer values.
integer function, public lihash_lookup(h, key, found)
Look up a value in the hash table h. If found is present, it indicates if key could be found in the t...
subroutine, public lihash_insert(h, key, val)
Insert a (key, val) pair into the hash table h.
subroutine, public lihash_end(h)
Free a hash table.
subroutine, public lihash_init(h)
Initialize a hash table h.
This module implements the index, used for the mesh points.
subroutine, public index_init(idx, dim)
This subroutine allocates memory and initializes some components.
integer(int64) function, public get_blocked_index(dim, point, bsize, number_of_blocks, increase_with_dimensions)
subroutine, public index_spatial_to_point(idx, dim, ispatial, point)
integer, parameter, public idx_hilbert
subroutine, public index_point_to_spatial(idx, dim, ispatial, point)
integer, parameter, public idx_cubic
This module is intended to contain "only mathematical" functions and procedures.
subroutine, public merge_sorted_arrays(array, sizes, merged, index_map)
This module contains subroutines, related to the initialization of the mesh.
subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
reorder the points in the grid according to the variables MeshOrder and MeshLocalOrder
subroutine rebalance_array(data_input, data_output, output_sizes)
re-distribute the points to improve load balancing
subroutine, public mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
When running parallel in domains, stencil and np_stencil are needed to compute the ghost points....
subroutine, public mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
First stage mesh initialization.
subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
return the sizes and offsets of a distributed array for all tasks of a mpi group.
subroutine, public mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
This subroutine creates the global array of spatial indices and the inverse mapping.
This module defines the meshes, which are used in Octopus.
subroutine, public mesh_global_index_to_coords(mesh, ipg, ix)
Given a global point index, this function returns the set of integer coordinates of the point.
integer(int64) function, public mesh_global_index_from_coords(mesh, ix)
This function returns the true global index of the point for a given vector of integer coordinates.
subroutine, public mesh_local_index_to_coords(mesh, ip, ix)
Given a local point index, this function returns the set of integer coordinates of the point.
integer(int64) function, public mesh_local2global(mesh, ip)
This function returns the global mesh index for a given local index.
real(real64) function, dimension(1:mesh%box%dim), public mesh_x_global(mesh, ipg)
subroutine, public mesh_partition_from_parent(mesh, parent)
create a mesh partition from a given parent mesh
subroutine, public mesh_partition_write_info(mesh, iunit, namespace)
subroutine, public mesh_partition_messages_debug(mesh, namespace)
subroutine, public mesh_partition(mesh, namespace, space, lapl_stencil, vsize)
This routine converts the mesh given by grid points into a graph.
subroutine, public mesh_partition_dump(restart, mesh, vsize, ierr)
subroutine, public mesh_partition_load(restart, mesh, ierr)
subroutine, public messages_end()
subroutine, public messages_warning(no_lines, all_nodes, namespace)
subroutine, public print_date(str)
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
subroutine, public messages_input_error(namespace, var, details, row, column)
This module contains some common usage patterns of MPI routines.
subroutine, public lmpi_sync_shared_memory_window(window, intranode_grp)
subroutine, public create_intranode_communicator(base_grp, intranode_grp, internode_grp)
logical function mpi_grp_is_root(grp)
type(mpi_grp_t), public mpi_world
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
integer, public mpi_err
used to store return values of mpi calls
This module handles the communicators for the various parallelization strategies.
Some general things and nomenclature:
subroutine, public par_vec_end(pv)
Deallocate memory used by pv.
subroutine, public par_vec_init(mpi_grp, np_global, idx, stencil, space, partition, pv, namespace)
Initializes a par_vec_type object (parallel vector).
integer function, public parse_block(namespace, name, blk, check_varinfo_)
subroutine, public partition_get_global(partition, part_global, root)
Returns the global partition. If root is present, the partition is gathered only in that node....
subroutine, public profiling_end(namespace)
integer, parameter, public restart_partition
subroutine, public restart_init(restart, namespace, data_type, type, mc, ierr, mesh, dir, exact)
Initializes a restart object.
integer, parameter, public restart_type_dump
integer, parameter, public restart_type_load
subroutine, public restart_end(restart)
This module is intended to contain "only mathematical" functions and procedures.
This module defines stencils used in Octopus.
This module is intended to contain simple general-purpose utility functions and procedures.
subroutine, public make_array_larger(array, new_size)
class to tell whether a point is inside or outside
Describes mesh distribution to nodes.
This is defined even when running serial.
Stores all communicators and groups.
The class representing the stencil, which is used for non-local mesh operations.