Octopus
mesh_init.F90
Go to the documentation of this file.
1!! Copyright (C) 2002-2006 M. Marques, A. Castro, A. Rubio, G. Bertsch
2!! Copyright (C) 2021 S. Ohlmann
3!!
4!! This program is free software; you can redistribute it and/or modify
5!! it under the terms of the GNU General Public License as published by
6!! the Free Software Foundation; either version 2, or (at your option)
7!! any later version.
8!!
9!! This program is distributed in the hope that it will be useful,
10!! but WITHOUT ANY WARRANTY; without even the implied warranty of
11!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12!! GNU General Public License for more details.
13!!
14!! You should have received a copy of the GNU General Public License
15!! along with this program; if not, write to the Free Software
16!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17!! 02110-1301, USA.
18!!
19
20#include "global.h"
21
24module mesh_init_oct_m
26 use box_oct_m
30 use debug_oct_m
31 use global_oct_m
32 use iihash_oct_m
33 use index_oct_m
34 use math_oct_m
36 use mesh_oct_m
40 use mpi_oct_m
45 use parser_oct_m
49 use sort_oct_m
50 use space_oct_m
52 use utils_oct_m
53
54 implicit none
55
56 private
57 public :: &
61
62
63 integer, parameter :: INNER_POINT = 1
64 integer, parameter :: ENLARGEMENT_POINT = 2
65 integer, parameter :: BOUNDARY = -1
66
67contains
68
69! ---------------------------------------------------------
76 subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
77 class(mesh_t), intent(inout) :: mesh
78 type(namespace_t), intent(in) :: namespace
79 class(space_t), intent(in) :: space
80 class(box_t), target, intent(in) :: box
81 class(coordinate_system_t), target, intent(in) :: coord_system
82 real(real64), intent(in) :: spacing(1:space%dim)
83 integer, intent(in) :: enlarge(1:space%dim)
84
85 integer :: idir, jj, delta
86 real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
87 logical :: out
88
89 push_sub_with_profile(mesh_init_stage_1)
90
91 mesh%box => box
92
93 safe_allocate(mesh%spacing(1:space%dim))
94 mesh%spacing = spacing ! this number can change in the following
95
96 mesh%use_curvilinear = coord_system%local_basis
97 mesh%coord_system => coord_system
98
99 call index_init(mesh%idx, space%dim)
100 mesh%idx%enlarge = enlarge
101
102 ! get box bounds along the axes that generate the grid points
103 select type (coord_system)
104 class is (affine_coordinates_t)
105 box_bounds = box%bounds(coord_system%basis)
106 class default
107 box_bounds = box%bounds()
108 end select
109
110 ! adjust nr
111 do idir = 1, space%dim
112 chi = m_zero
113 ! the upper border
114 jj = 0
115 out = .false.
116 do while(.not.out)
117 jj = jj + 1
118 chi(idir) = real(jj, real64) * mesh%spacing(idir)
119 if (mesh%use_curvilinear) then
120 x = coord_system%to_cartesian(chi)
121 out = x(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
122 else
123 ! do the same comparison here as in simul_box_contains_points
124 out = chi(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
125 end if
126 end do
127 mesh%idx%nr(2, idir) = jj - 1
128 end do
129
130 ! we have a symmetric mesh (for now)
131 mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
132
133 ! we have to adjust a couple of things for the periodic directions
134 do idir = 1, space%periodic_dim
135 if (mesh%idx%nr(2, idir) == 0) then
136 ! this happens if Spacing > box size
137 mesh%idx%nr(2, idir) = 1
138 mesh%idx%nr(1, idir) = -1
139 end if
140
141 ! We have to adjust the spacing to be commensurate with the box,
142 ! for this we scan the possible values of the grid size around the
143 ! one we selected. We choose the size that has the spacing closest
144 ! to the requested one.
145 do delta = -1, 1
146 spacing_new(delta) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
147 spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
148 end do
149
150 delta = minloc(spacing_new, dim = 1) - 2
151
152 assert(delta >= -1)
153 assert(delta <= 1)
154
155 mesh%spacing(idir) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
157 ! we need to adjust the grid by adding or removing one point
158 if (delta == -1) then
159 mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
160 else if (delta == 1) then
161 mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
162 end if
163
164 end do
165
166 if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) ) then
167 call messages_write('The spacing has been modified to make it commensurate with the periodicity of the system.')
168 call messages_warning()
169 end if
170
171 do idir = space%periodic_dim + 1, space%dim
172 if (mesh%idx%nr(2, idir) == 0) then
173 write(message(1),'(a,i2)') 'Spacing > box size in direction ', idir
174 call messages_fatal(1, namespace=namespace)
175 end if
176 end do
177
178 mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
179 ! compute strides for cubic indices
180 mesh%idx%stride(:) = 1
181 do idir = 2, space%dim+1
182 mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
183 (mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
184 end do
185
186 pop_sub_with_profile(mesh_init_stage_1)
187 end subroutine mesh_init_stage_1
188
189 ! ---------------------------------------------------------
195 !
196 subroutine mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
197 class(mesh_t), intent(inout) :: mesh
198 type(namespace_t), intent(in) :: namespace
199 class(space_t), intent(in) :: space
200 class(box_t), intent(in) :: box
201 type(stencil_t), intent(in) :: stencil
202 logical, optional, intent(in) :: regenerate
203
204 integer :: is
205 real(real64) :: chi(1:space%dim)
206 real(real64) :: pos(space%dim)
207 integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
208 integer(int64) :: global_size
209 integer(int32) :: local_size
210 integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
211 integer :: ip, ib, ib2, np, np_boundary, ii
212 logical :: found
213 type(lihash_t) :: spatial_to_boundary
214 integer(int64), allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
215 integer(int64), allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
216 integer(int64), allocatable :: spatial_to_grid(:)
217 integer, allocatable :: sizes(:)
218 integer(int64), allocatable :: offsets(:)
219 integer :: size_boundary
220#ifdef HAVE_MPI
221 integer(int64), pointer :: ptr(:)
222 type(mpi_grp_t) :: internode_grp, intranode_grp
223#endif
224
225 push_sub_with_profile(mesh_init_stage_2)
226
227 if (.not. optional_default(regenerate, .false.)) then
228 ! enlarge mesh for boundary points
229 mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
230 mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
231 end if
232
233 !%Variable MeshIndexType
234 !%Type integer
235 !%Default idx_cubic
236 !%Section Mesh
237 !%Description
238 !% Determine index type. Must be the same for restarting a calculation.
239 !%Option idx_cubic 1
240 !% Cubic indices are used to map the spatial information to the grid points.
241 !%Option idx_hilbert 2
242 !% A Hilbert space-filling curve is used to map the spatial information to
243 !% the grid points.
244 !%End
245 call parse_variable(namespace, 'MeshIndexType', idx_cubic, mesh%idx%type)
246
247 grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
248 mesh%idx%offset = grid_sizes/2
249 if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64)))) then
250 write(message(1), '(A, I10, A, I2, A)') "Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
251 " points in one direction for ", space%dim, " dimensions. This is not supported."
252 call messages_fatal(1, namespace=namespace)
253 end if
254 global_size = product(int(grid_sizes, int64))
255 ! compute the bits per dimension: grid_sizes(i) <= 2**bits
256 mesh%idx%bits = maxval(ceiling(log(real(grid_sizes, real64) )/log(2.)))
257
258 if (mesh%idx%type == idx_cubic) then
259 spatial_size = global_size
260 else if (mesh%idx%type == idx_hilbert) then
261 spatial_size = 2**(space%dim*mesh%idx%bits)
262 end if
263
264 ! use block data decomposition of spatial indices
265 istart = spatial_size * mpi_world%rank/mpi_world%size
266 iend = spatial_size * (mpi_world%rank+1)/mpi_world%size - 1
267 assert(iend - istart + 1 < huge(0_int32))
268 local_size = int(iend - istart + 1, int32)
269
270 safe_allocate(grid_to_spatial_initial(1:local_size))
271
272 ! get inner grid indices
273 ip = 1
274 do ispatial = istart, iend
275 call index_spatial_to_point(mesh%idx, space%dim, ispatial, point)
276 ! first check if point is outside bounding box
277 if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
278 if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
279 ! then check if point is inside simulation box
280 chi = real(point, real64) * mesh%spacing
281 pos = mesh%coord_system%to_cartesian(chi)
282 if (.not. box%contains_point(pos)) cycle
283 grid_to_spatial_initial(ip) = ispatial
284 assert(ip + 1 < huge(ip))
285 ip = ip + 1
286 end do
287 np = ip - 1
288
289 call rebalance_array(grid_to_spatial_initial(1:np), grid_to_spatial, sizes)
290 np = sizes(mpi_world%rank)
291
292 safe_deallocate_a(grid_to_spatial_initial)
293
294 safe_allocate(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
295 safe_deallocate_a(sizes)
296
297 !$omp parallel do
298 do ispatial = grid_to_spatial(1), grid_to_spatial(np)
299 spatial_to_grid(ispatial) = -1
300 end do
301 !$omp parallel do
302 do ip = 1, np
303 spatial_to_grid(grid_to_spatial(ip)) = ip
304 end do
305
306 ! get local boundary indices
307 call lihash_init(spatial_to_boundary)
308 size_boundary = np
309 safe_allocate(boundary_to_spatial(1:size_boundary))
310 ib = 1
311 do ip = 1, np
312 call index_spatial_to_point(mesh%idx, space%dim, grid_to_spatial(ip), point)
313 do is = 1, stencil%size
314 if (stencil%center == is) cycle
315 point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
316 ! check if point is in inner part
317 call index_point_to_spatial(mesh%idx, space%dim, ispatialb, point_stencil)
318 assert(ispatialb >= 0)
319 if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
320 ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64)) then
321 if (spatial_to_grid(ispatialb) > 0) cycle
322 end if
323 ! then check if point is inside simulation box
324 chi = real(point_stencil, real64) * mesh%spacing
325 pos = mesh%coord_system%to_cartesian(chi)
326 if (box%contains_point(pos)) cycle
327 ! it has to be a boundary point now
328 ! check if already counted
329 ib2 = lihash_lookup(spatial_to_boundary, ispatialb, found)
330 if (found) cycle
331 boundary_to_spatial(ib) = ispatialb
332 call lihash_insert(spatial_to_boundary, ispatialb, ib)
333 ib = ib + 1
334 ! enlarge array
335 if (ib >= size_boundary) then
336 size_boundary = size_boundary * 2
337 call make_array_larger(boundary_to_spatial, size_boundary)
338 end if
339 end do
340 end do
341 np_boundary = ib - 1
342 call lihash_end(spatial_to_boundary)
343 safe_deallocate_a(spatial_to_grid)
344
345 ! reorder inner points
346 call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
347 safe_deallocate_a(grid_to_spatial)
348
349 call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
350 np = sizes(mpi_world%rank)
351 mesh%np_global = sizes(0)
352 do ii = 1, mpi_world%size - 1
353 mesh%np_global = mesh%np_global + sizes(ii)
354 end do
355 safe_deallocate_a(sizes)
356 safe_deallocate_a(grid_to_spatial_reordered)
357
358 ! reorder boundary points
359 call make_array_larger(boundary_to_spatial, np_boundary)
360 call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
361 safe_deallocate_a(boundary_to_spatial)
362
363 call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
364 safe_deallocate_a(boundary_to_spatial_reordered)
365
366 ! global grid size
367 np_boundary = sizes(mpi_world%rank)
368 mesh%np_part_global = mesh%np_global + sizes(0)
369 do ii = 1, mpi_world%size - 1
370 mesh%np_part_global = mesh%np_part_global + sizes(ii)
371 end do
372 safe_deallocate_a(sizes)
373
374
375 ! get global indices
376#ifdef HAVE_MPI
377 ! create shared memory window and fill it only on root
378 call create_intranode_communicator(mpi_world, intranode_grp, internode_grp)
379 call lmpi_create_shared_memory_window(mesh%np_part_global, intranode_grp, &
380 mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
381#else
382 safe_allocate(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
383#endif
384 ! inner grid
385 call get_sizes_offsets(np, sizes, offsets)
386 call mpi_world%gatherv(grid_to_spatial, np, mpi_integer8, &
387 mesh%idx%grid_to_spatial_global, sizes, offsets, mpi_integer8, 0)
388
389 ! boundary indices
390 call get_sizes_offsets(np_boundary, sizes, offsets)
391 call mpi_world%gatherv(boundary_to_spatial, np_boundary, mpi_integer8, &
392 mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, mpi_integer8, 0)
393
394 ! fill global hash map
395#ifdef HAVE_MPI
396 ! create shared memory window and fill it only on root
397 call lmpi_create_shared_memory_window(spatial_size, intranode_grp, &
398 mesh%idx%window_spatial_to_grid, ptr)
399 mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
400#else
401 safe_allocate(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
402#endif
403 if (mpi_grp_is_root(mpi_world)) then
404 ! fill only on root, then broadcast
405 !$omp parallel do
406 do ispatial = 0, spatial_size-1
407 mesh%idx%spatial_to_grid_global(ispatial) = 0
408 end do
409 !$omp parallel do
410 do ipg = 1, mesh%np_part_global
411 mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
412 end do
413 end if
414
415#ifdef HAVE_MPI
416 ! now broadcast the global arrays to local rank 0 on each node
417 if (intranode_grp%rank == 0) then
418 call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, mpi_integer8, 0)
419 call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, mpi_integer8, 0)
420 end if
421 call lmpi_sync_shared_memory_window(mesh%idx%window_grid_to_spatial, intranode_grp)
422 call lmpi_sync_shared_memory_window(mesh%idx%window_spatial_to_grid, intranode_grp)
423#endif
424
425 safe_deallocate_a(offsets)
426 safe_deallocate_a(sizes)
427
428 safe_deallocate_a(boundary_to_spatial)
429 safe_deallocate_a(grid_to_spatial)
430
431 pop_sub_with_profile(mesh_init_stage_2)
432 end subroutine mesh_init_stage_2
433
434! ---------------------------------------------------------
439! ---------------------------------------------------------
440 subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
441 class(mesh_t), intent(inout) :: mesh
442 type(namespace_t), intent(in) :: namespace
443 class(space_t), intent(in) :: space
444 type(stencil_t), intent(in) :: stencil
445 type(multicomm_t), intent(in) :: mc
446 type(mesh_t), optional, intent(in) :: parent
447 logical, optional, intent(in) :: regenerate
448
449 integer :: ip
450
451 push_sub_with_profile(mesh_init_stage_3)
452
453 call mpi_grp_init(mesh%mpi_grp, mc%group_comm(p_strategy_domains))
454
455 ! check if we are running in parallel in domains
456 mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
457
458 call checksum_calculate(1, mesh%np_part_global, mesh%idx%grid_to_spatial_global(1), &
459 mesh%idx%checksum)
460
461 if (mesh%parallel_in_domains) then
462 call do_partition()
463 else
464 ! When running serially those two are the same.
465 assert(mesh%np_part_global < huge(mesh%np_part))
466 mesh%np = i8_to_i4(mesh%np_global)
467 mesh%np_part = i8_to_i4(mesh%np_part_global)
468
469 ! These must be initialized for par_vec_gather, par_vec_scatter to work
470 ! as copy operations when running without domain parallelization.
471 mesh%pv%np_global = mesh%np_global
472 mesh%pv%np_ghost = 0
473 mesh%pv%np_bndry = mesh%np_part - mesh%np
474 mesh%pv%npart = 1
475 mesh%pv%xlocal = 1
476 end if
477
478 ! Compute mesh%x
479 safe_allocate(mesh%x(1:mesh%np_part, 1:space%dim))
480 !$omp parallel do
481 do ip = 1, mesh%np_part
482 mesh%x(ip, 1:space%dim) = m_zero
483 end do
484
485 do ip = 1, mesh%np_part
486 mesh%x(ip, 1:space%dim) = mesh_x_global(mesh, mesh_local2global(mesh, ip))
487 end do
488
489 call mesh_get_vol_pp()
490
491 pop_sub_with_profile(mesh_init_stage_3)
492
493 contains
494 ! ---------------------------------------------------------
495 subroutine do_partition()
496#ifdef HAVE_MPI
497 integer :: jj, ipart, jpart
498 integer(int64) :: ipg
499 integer, allocatable :: gindex(:), gedges(:)
500 logical, allocatable :: nb(:, :)
501 integer :: idx(space%dim), jx(space%dim)
502 type(mpi_comm) :: graph_comm
503 integer :: iedge, nnb
504 logical :: use_topo, reorder, partition_print
505 integer :: ierr
506
507 logical :: has_virtual_partition = .false.
508 integer :: vsize
509 type(restart_t) :: restart_load, restart_dump
510 integer, allocatable :: part_vec(:)
511
513
514 !Try to load the partition from the restart files
515 if (.not. optional_default(regenerate, .false.)) then
516 call restart_init(restart_load, namespace, restart_partition, restart_type_load, mc, ierr, mesh=mesh, exact=.true.)
517 if (ierr == 0) call mesh_partition_load(restart_load, mesh, ierr)
518 call restart_end(restart_load)
519 else
520 ierr = 0
521 end if
522
523 if (ierr /= 0) then
524
525 !%Variable MeshPartitionVirtualSize
526 !%Type integer
527 !%Default mesh mpi_grp size
528 !%Section Execution::Parallelization
529 !%Description
530 !% Gives the possibility to change the partition nodes.
531 !% Afterward, it crashes.
532 !%End
533 call parse_variable(namespace, 'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
534
535 if (vsize /= mesh%mpi_grp%size) then
536 write(message(1),'(a,I7)') "Changing the partition size to", vsize
537 write(message(2),'(a)') "The execution will crash."
538 call messages_warning(2, namespace=namespace)
539 has_virtual_partition = .true.
540 else
541 has_virtual_partition = .false.
542 end if
543
544 if (.not. present(parent)) then
545 call mesh_partition(mesh, namespace, space, stencil, vsize)
546 else
547 ! if there is a parent grid, use its partition
548 call mesh_partition_from_parent(mesh, parent)
549 end if
550
551 !Now that we have the partitions, we save them
552 call restart_init(restart_dump, namespace, restart_partition, restart_type_dump, mc, ierr, mesh=mesh)
553 call mesh_partition_dump(restart_dump, mesh, vsize, ierr)
554 call restart_end(restart_dump)
555 end if
556
557 if (has_virtual_partition) then
558 call profiling_end(namespace)
559 call print_date("Calculation ended on ")
560 write(message(1),'(a)') "Execution has ended."
561 write(message(2),'(a)') "If you want to run your system, do not use MeshPartitionVirtualSize."
562 call messages_warning(2, namespace=namespace)
563 call messages_end()
564 call global_end()
565 stop
566 end if
567
568 !%Variable MeshUseTopology
569 !%Type logical
570 !%Default false
571 !%Section Execution::Parallelization
572 !%Description
573 !% (experimental) If enabled, <tt>Octopus</tt> will use an MPI virtual
574 !% topology to map the processors. This can improve performance
575 !% for certain interconnection systems.
576 !%End
577 call parse_variable(namespace, 'MeshUseTopology', .false., use_topo)
578
579 if (use_topo) then
580 ! At the moment we still need the global partition. This will be removed in near future.
581 safe_allocate(part_vec(1:mesh%np_part_global))
582 call partition_get_global(mesh%partition, part_vec(1:mesh%np_global))
583
584
585 ! generate a table of neighbours
586
587 safe_allocate(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
588 nb = .false.
589
590 do ipg = 1, mesh%np_global
591 ipart = part_vec(ipg)
592 call mesh_global_index_to_coords(mesh, ipg, idx)
593 do jj = 1, stencil%size
594 jx = idx + stencil%points(:, jj)
595 if (all(jx >= mesh%idx%nr(1, :)) .and. all(jx <= mesh%idx%nr(2, :))) then
596 jpart = part_vec(mesh_global_index_from_coords(mesh, jx))
597 if (ipart /= jpart ) nb(ipart, jpart) = .true.
598 end if
599 end do
600 end do
601 safe_deallocate_a(part_vec)
602
603 ! now generate the information of the graph
604
605 safe_allocate(gindex(1:mesh%mpi_grp%size))
606 safe_allocate(gedges(1:count(nb)))
607
608 ! and now generate it
609 iedge = 0
610 do ipart = 1, mesh%mpi_grp%size
611 do jpart = 1, mesh%mpi_grp%size
612 if (nb(ipart, jpart)) then
613 iedge = iedge + 1
614 gedges(iedge) = jpart - 1
615 end if
616 end do
617 gindex(ipart) = iedge
618 end do
619
620 assert(iedge == count(nb))
621
622 reorder = .true.
623 call mpi_graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm, mpi_err)
624
625 ! we have a new communicator
626 call mpi_grp_init(mesh%mpi_grp, graph_comm)
627
628 safe_deallocate_a(nb)
629 safe_deallocate_a(gindex)
630 safe_deallocate_a(gedges)
631
632 end if
633
634 if (optional_default(regenerate, .false.)) call par_vec_end(mesh%pv)
635 call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
636 space, mesh%partition, mesh%pv, namespace)
637
638 ! check the number of ghost neighbours in parallel
639 nnb = 0
640 jpart = mesh%pv%partno
641 do ipart = 1, mesh%pv%npart
642 if (ipart == jpart) cycle
643 if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
644 end do
645 assert(nnb >= 0 .and. nnb < mesh%pv%npart)
646
647 ! Set local point numbers.
648 mesh%np = mesh%pv%np_local
649 mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
650
651 !%Variable PartitionPrint
652 !%Type logical
653 !%Default true
654 !%Section Execution::Parallelization
655 !%Description
656 !% (experimental) If disabled, <tt>Octopus</tt> will not compute
657 !% nor print the partition information, such as local points,
658 !% no. of neighbours, ghost points and boundary points.
659 !%End
660 call parse_variable(namespace, 'PartitionPrint', .true., partition_print)
661
662 if (partition_print) then
663 call mesh_partition_write_info(mesh, namespace=namespace)
664 call mesh_partition_messages_debug(mesh, namespace)
665 end if
666#endif
667
669 end subroutine do_partition
670
671
672 ! ---------------------------------------------------------
674 subroutine mesh_get_vol_pp()
675
676 integer :: jj(space%dim), ip, np
677 real(real64) :: chi(space%dim)
678
680
681 np = 1
682 if (mesh%use_curvilinear) np = mesh%np_part
683 ! If no local point, we should not try to access the arrays
684 if (mesh%np_part == 0) np = 0
685
686 safe_allocate(mesh%vol_pp(1:np))
687
688 do ip = 1, np
689 mesh%vol_pp(ip) = product(mesh%spacing)
690 end do
691
692 do ip = 1, np
693 call mesh_local_index_to_coords(mesh, ip, jj)
694 chi = jj*mesh%spacing
695 mesh%vol_pp(ip) = mesh%vol_pp(ip)*mesh%coord_system%det_Jac(mesh%x(ip, :), chi)
696 end do
697
698 if (mesh%use_curvilinear .or. mesh%np_part == 0) then
699 mesh%volume_element = m_one
700 else
701 mesh%volume_element = mesh%vol_pp(1)
702 end if
703
705 end subroutine mesh_get_vol_pp
706
707 end subroutine mesh_init_stage_3
708
713 subroutine rebalance_array(data_input, data_output, output_sizes)
714 integer(int64), contiguous, intent(in) :: data_input(:)
715 integer(int64), allocatable, intent(out) :: data_output(:)
716 integer, allocatable, optional, intent(out) :: output_sizes(:)
717
718 integer, allocatable :: initial_sizes(:), final_sizes(:)
719 integer(int64), allocatable :: initial_offsets(:), final_offsets(:)
720 integer, allocatable :: scounts(:), sdispls(:)
721 integer, allocatable :: rcounts(:), rdispls(:)
722 integer :: irank
723 integer(int64) :: itmp
724
725 push_sub(rebalance_array)
726
727 ! collect current sizes of distributed array
728 safe_allocate(initial_sizes(0:mpi_world%size-1))
729 call mpi_world%allgather(size(data_input), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
730 safe_allocate(initial_offsets(0:mpi_world%size))
731 initial_offsets(0) = 0
732 do irank = 1, mpi_world%size
733 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
734 end do
735
736 ! now redistribute the arrays
737 ! use block data decomposition of grid indices
738 safe_allocate(final_offsets(0:mpi_world%size))
739 safe_allocate(final_sizes(0:mpi_world%size-1))
740
741 do irank = 0, mpi_world%size
742 final_offsets(irank) = sum(int(initial_sizes, int64)) * irank/mpi_world%size
743 end do
744 do irank = 0, mpi_world%size - 1
745 assert(final_offsets(irank + 1) - final_offsets(irank) < huge(0_int32))
746 final_sizes(irank) = int(final_offsets(irank + 1) - final_offsets(irank), int32)
747 end do
748
749 safe_allocate(scounts(0:mpi_world%size-1))
750 safe_allocate(sdispls(0:mpi_world%size-1))
751 safe_allocate(rcounts(0:mpi_world%size-1))
752 safe_allocate(rdispls(0:mpi_world%size-1))
753 ! determine communication pattern
754 scounts = 0
755 do irank = 0, mpi_world%size - 1
756 ! get overlap of initial and final distribution
757 itmp = min(final_offsets(irank+1), initial_offsets(mpi_world%rank+1)) - &
758 max(final_offsets(irank), initial_offsets(mpi_world%rank))
759 assert(itmp < huge(0_int32))
760 if (itmp < 0) then
761 scounts(irank) = 0
762 else
763 scounts(irank) = int(itmp, int32)
764 end if
765 end do
766 sdispls(0) = 0
767 do irank = 1, mpi_world%size - 1
768 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
769 end do
770 assert(sum(int(scounts, int64)) < huge(0_int32))
771 assert(sum(scounts) == initial_sizes(mpi_world%rank))
772
773 rcounts = 0
774 do irank = 0, mpi_world%size - 1
775 ! get overlap of initial and final distribution
776 itmp = min(final_offsets(mpi_world%rank+1), initial_offsets(irank+1)) - &
777 max(final_offsets(mpi_world%rank), initial_offsets(irank))
778 assert(itmp < huge(0_int32))
779 if (itmp < 0) then
780 rcounts(irank) = 0
781 else
782 rcounts(irank) = int(itmp, int32)
783 end if
784 end do
785 rdispls(0) = 0
786 do irank = 1, mpi_world%size - 1
787 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
788 end do
789 ! check for consistency between sending and receiving
790 assert(sum(rcounts) == final_sizes(mpi_world%rank))
791
792 safe_allocate(data_output(1:final_sizes(mpi_world%rank)))
793 call mpi_world%alltoallv(data_input, scounts, sdispls, mpi_integer8, &
794 data_output, rcounts, rdispls, mpi_integer8)
795
796 ! save final sizes of array if optional argument present
797 if (present(output_sizes)) then
798 safe_allocate(output_sizes(0:mpi_world%size-1))
799 output_sizes(:) = final_sizes(:)
800 end if
801
802 pop_sub(rebalance_array)
803 end subroutine rebalance_array
804
809 subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
810 type(namespace_t), intent(in) :: namespace
811 class(space_t), intent(in) :: space
812 type(index_t), intent(in) :: idx
813 integer(int64), intent(in) :: grid_to_spatial(:)
814 integer(int64), allocatable, intent(out) :: grid_to_spatial_reordered(:)
815
816 integer :: bsize(space%dim), order, default
817 integer :: nn, idir, ipg, ip, number_of_blocks(space%dim)
818 type(block_t) :: blk
819 integer, parameter :: &
820 ORDER_BLOCKS = 1, &
821 order_original = 2, &
822 order_cube = 3
823 integer :: point(1:space%dim)
824 integer(int64), allocatable :: reorder_indices(:), reorder_recv(:)
825 integer, allocatable :: index_map(:), indices(:)
826 integer(int64), allocatable :: grid_to_spatial_recv(:)
827 integer, allocatable :: initial_sizes(:)
828 integer(int64), allocatable :: initial_offsets(:)
829 integer(int64) :: istart, iend, indstart, indend, spatial_size
830 integer :: irank, local_size, num_recv
831 integer :: iunique, nunique
832 integer :: direction
833 logical :: increase_with_dimension
834
835 integer, allocatable :: scounts(:), sdispls(:), rcounts(:), rdispls(:)
836 integer(int64), allocatable :: spatial_cutoff(:)
837
838 push_sub(reorder_points)
839
840 !%Variable MeshOrder
841 !%Type integer
842 !%Section Execution::Optimization
843 !%Description
844 !% This variable controls how the grid points are mapped to a
845 !% linear array for global arrays. For runs that are parallel
846 !% in domains, the local mesh order may be different (see
847 !% <tt>MeshLocalOrder</tt>).
848 !% The default is blocks when serial in domains and cube when
849 !% parallel in domains with the local mesh order set to blocks.
850 !%Option order_blocks 1
851 !% The grid is mapped using small parallelepipedic grids. The size
852 !% of the blocks is controlled by <tt>MeshBlockSize</tt>.
853 !%Option order_original 2
854 !% The original order of the indices is used to map the grid.
855 !%Option order_cube 3
856 !% The grid is mapped using a full cube, i.e. without blocking.
857 !%End
858 default = order_blocks
859 call parse_variable(namespace, 'MeshOrder', default, order)
860 ! no reordering in 1D necessary
861 if (space%dim == 1) then
862 order = order_original
863 end if
864
865 !%Variable MeshBlockDirection
866 !%Type integer
867 !%Section Execution::Optimization
868 !%Description
869 !% Determines the direction in which the dimensions are chosen to compute
870 !% the blocked index for sorting the mesh points (see MeshBlockSize).
871 !% The default is increase_with_dimensions, corresponding to xyz ordering
872 !% in 3D.
873 !%Option increase_with_dimension 1
874 !% The fastest changing index is in the first dimension, i.e., in 3D this
875 !% corresponds to ordering in xyz directions.
876 !%Option decrease_with_dimension 2
877 !% The fastest changing index is in the last dimension, i.e., in 3D this
878 !% corresponds to ordering in zyx directions.
879 !%End
880 call parse_variable(namespace, 'MeshBlockDirection', 1, direction)
881 increase_with_dimension = direction == 1
882 if (direction /= 1 .and. direction /= 2) then
883 call messages_input_error(namespace, 'MeshBlockDirection')
884 end if
885
886 select case (order)
887 case (order_original)
888 ! only copy points, they stay in their original ordering
889 safe_allocate(grid_to_spatial_reordered(1:size(grid_to_spatial)))
890 grid_to_spatial_reordered(1:size(grid_to_spatial)) = grid_to_spatial(1:size(grid_to_spatial))
891 case (order_blocks, order_cube)
892 if (order == order_cube) then
893 bsize = idx%nr(2, :) - idx%nr(1, :) + 1
894 else
895 !%Variable MeshBlockSize
896 !%Type block
897 !%Section Execution::Optimization
898 !%Description
899 !% To improve memory-access locality when calculating derivatives,
900 !% <tt>Octopus</tt> arranges mesh points in blocks. This variable
901 !% controls the size of this blocks in the different
902 !% directions. The default is selected according to the value of
903 !% the <tt>StatesBlockSize</tt> variable. (This variable only affects the
904 !% performance of <tt>Octopus</tt> and not the results.)
905 !%End
906 if (conf%target_states_block_size < 16) then
907 bsize(1) = 80 * 4 / abs(conf%target_states_block_size)
908 if (space%dim > 1) bsize(2) = 4
909 if (space%dim > 2) bsize(3:) = 10
910 else
911 bsize(1) = max(4 * 16 / abs(conf%target_states_block_size), 1)
912 if (space%dim > 1) bsize(2) = 15
913 if (space%dim > 2) bsize(3:) = 15
914 end if
915
916 if (parse_block(namespace, 'MeshBlockSize', blk) == 0) then
917 nn = parse_block_cols(blk, 0)
918 if (nn /= space%dim) then
919 message(1) = "Error: number of entries in MeshBlockSize must match the number of dimensions."
920 call messages_fatal(1, namespace=namespace)
921 end if
922 do idir = 1, nn
923 call parse_block_integer(blk, 0, idir - 1, bsize(idir))
924 end do
925 end if
926 end if
927
928 number_of_blocks = (idx%nr(2, :) - idx%nr(1, :) + 1) / bsize + 1
929
930
931 ! do the global reordering in parallel, use block data decomposition of global indices
932 ! reorder indices along blocked parallelepiped curve
933
934 ! collect current sizes of distributed array
935 safe_allocate(initial_sizes(0:mpi_world%size-1))
936 call mpi_world%allgather(size(grid_to_spatial), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
937 safe_allocate(initial_offsets(0:mpi_world%size))
938 initial_offsets(0) = 0
939 do irank = 1, mpi_world%size
940 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
941 end do
942
943 ! get local range and size
944 istart = initial_offsets(mpi_world%rank)
945 iend = initial_offsets(mpi_world%rank + 1) - 1
946 assert(iend - istart + 1 < huge(0_int32))
947 local_size = int(iend - istart + 1, int32)
948 assert(local_size == initial_sizes(mpi_world%rank))
949
950 ! compute new indices locally
951 safe_allocate(reorder_indices(1:local_size))
952 safe_allocate(indices(1:local_size))
953 safe_allocate(grid_to_spatial_reordered(1:local_size))
954 !$omp parallel do private(point)
955 do ip = 1, local_size
956 call index_spatial_to_point(idx, space%dim, grid_to_spatial(ip), point)
957 point = point + idx%offset
958 reorder_indices(ip) = get_blocked_index(space%dim, point, bsize, number_of_blocks, increase_with_dimension)
959 end do
960 ! parallel sort according to the new indices
961 ! sort the local array
962 call sort(reorder_indices, indices)
963 ! save reordered indices to send to other processes
964 !$omp parallel do
965 do ip = 1, local_size
966 grid_to_spatial_reordered(ip) = grid_to_spatial(indices(ip))
967 end do
968
969 ! get minimum and maximum
970 indstart = reorder_indices(1)
971 indend = reorder_indices(local_size)
972 call mpi_world%allreduce_inplace(indstart, 1, mpi_integer8, mpi_min)
973 call mpi_world%allreduce_inplace(indend, 1, mpi_integer8, mpi_max)
974 spatial_size = indend - indstart + 1
975
976 ! get index ranges for each rank
977 safe_allocate(spatial_cutoff(0:mpi_world%size-1))
978 do irank = 0, mpi_world%size - 1
979 spatial_cutoff(irank) = spatial_size * (irank+1)/mpi_world%size + indstart
980 end do
981
982 safe_allocate(scounts(0:mpi_world%size-1))
983 safe_allocate(sdispls(0:mpi_world%size-1))
984 safe_allocate(rcounts(0:mpi_world%size-1))
985 safe_allocate(rdispls(0:mpi_world%size-1))
986 ! get send counts
987 scounts = 0
988 irank = 0
989 ! the indices are ordered, so we can go through them and increase
990 ! the rank to which they are associated to when we cross a cutoff
991 do ip = 1, local_size
992 if (reorder_indices(ip) >= spatial_cutoff(irank)) then
993 ! this do loop is needed in case some ranks do not have any points
994 do while (reorder_indices(ip) >= spatial_cutoff(irank))
995 irank = irank + 1
996 end do
997 assert(irank < mpi_world%size)
998 end if
999 scounts(irank) = scounts(irank) + 1
1000 end do
1001 safe_deallocate_a(spatial_cutoff)
1002 assert(sum(scounts) == local_size)
1003
1004 ! compute communication pattern (sdispls, rcounts, rdispls)
1005 sdispls(0) = 0
1006 do irank = 1, mpi_world%size - 1
1007 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
1008 end do
1009
1010 call mpi_world%alltoall(scounts, 1, mpi_integer, &
1011 rcounts, 1, mpi_integer)
1012
1013 rdispls(0) = 0
1014 do irank = 1, mpi_world%size - 1
1015 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
1016 end do
1017
1018 ! make sure the arrays get allocated also if we do not receive anything
1019 num_recv = max(sum(rcounts), 1)
1020 ! communicate the locally sorted indices
1021 safe_allocate(reorder_recv(1:num_recv))
1022 call mpi_world%alltoallv(reorder_indices, scounts, sdispls, mpi_integer8, &
1023 reorder_recv, rcounts, rdispls, mpi_integer8)
1024 safe_deallocate_a(reorder_indices)
1025
1026 ! communicate the corresponding spatial indices
1027 safe_allocate(grid_to_spatial_recv(1:num_recv))
1028 call mpi_world%alltoallv(grid_to_spatial_reordered, scounts, sdispls, mpi_integer8, &
1029 grid_to_spatial_recv, rcounts, rdispls, mpi_integer8)
1030 safe_deallocate_a(grid_to_spatial_reordered)
1031
1032 ! do k-way merge of sorted indices
1033 safe_allocate(reorder_indices(1:num_recv))
1034 safe_allocate(index_map(1:num_recv))
1035 if (sum(rcounts) > 0) then
1036 call merge_sorted_arrays(reorder_recv, rcounts, reorder_indices, index_map)
1037
1038 ! get number of unique indices, needed for boundary
1039 nunique = 1
1040 do ipg = 2, sum(rcounts)
1041 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1042 nunique = nunique + 1
1043 end if
1044 end do
1045
1046 ! reorder according to new order, but remove duplicate entries
1047 safe_allocate(grid_to_spatial_reordered(1:nunique))
1048 iunique = 1
1049 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(1))
1050 do ipg = 2, sum(rcounts)
1051 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1052 iunique = iunique + 1
1053 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(ipg))
1054 end if
1055 end do
1056 else
1057 safe_allocate(grid_to_spatial_reordered(1:0))
1058 end if
1059
1060 safe_deallocate_a(initial_offsets)
1061 safe_deallocate_a(initial_sizes)
1062
1063 safe_deallocate_a(reorder_indices)
1064 safe_deallocate_a(reorder_recv)
1065
1066 safe_deallocate_a(grid_to_spatial_recv)
1067 safe_deallocate_a(index_map)
1068 safe_deallocate_a(indices)
1069
1070 safe_deallocate_a(scounts)
1071 safe_deallocate_a(sdispls)
1072 safe_deallocate_a(rcounts)
1073 safe_deallocate_a(rdispls)
1074
1075 end select
1076 pop_sub(reorder_points)
1077 end subroutine reorder_points
1078
1081 subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
1082 integer, intent(in) :: local_size
1083 integer, allocatable, intent(out) :: sizes(:)
1084 integer(int64), allocatable, intent(out) :: offsets(:)
1085 type(mpi_grp_t), optional, intent(in) :: mpi_grp
1086
1087 integer :: irank
1088 type(mpi_grp_t) :: mpi_grp_
1089
1090 push_sub(get_sizes_offsets)
1091
1092 if (present(mpi_grp)) then
1093 mpi_grp_ = mpi_grp
1094 else
1095 mpi_grp_ = mpi_world
1096 end if
1097
1098 safe_allocate(sizes(0:mpi_grp_%size-1))
1099 call mpi_grp_%allgather(local_size, 1, mpi_integer, sizes(0), 1, mpi_integer)
1100 safe_allocate(offsets(0:mpi_grp_%size))
1101 offsets(0) = 0
1102 do irank = 1, mpi_grp_%size
1103 offsets(irank) = offsets(irank-1) + sizes(irank-1)
1104 end do
1105
1106 pop_sub(get_sizes_offsets)
1107 end subroutine get_sizes_offsets
1108
1109end module mesh_init_oct_m
1110
1111!! Local Variables:
1112!! mode: f90
1113!! coding: utf-8
1114!! End:
Box bounds along some axes.
Definition: box.F90:177
This is the common interface to a sorting routine. It performs the shell algorithm,...
Definition: sort.F90:149
double log(double __x) __attribute__((__nothrow__
subroutine do_partition()
Definition: mesh_init.F90:589
subroutine mesh_get_vol_pp()
calculate the volume of integration
Definition: mesh_init.F90:768
real(real64), parameter, public box_boundary_delta
Definition: box.F90:132
This module handles the calculation mode.
integer, parameter, public p_strategy_domains
parallelization in domains
real(real64), parameter, public m_two
Definition: global.F90:189
subroutine, public global_end()
Finalise parser varinfo file, and MPI.
Definition: global.F90:379
real(real64), parameter, public m_zero
Definition: global.F90:187
type(conf_t), public conf
Global instance of Octopus configuration.
Definition: global.F90:177
real(real64), parameter, public m_one
Definition: global.F90:188
This module implements a simple hash table for non-negative integer keys and integer values.
Definition: iihash.F90:125
integer function, public lihash_lookup(h, key, found)
Look up a value in the hash table h. If found is present, it indicates if key could be found in the t...
Definition: iihash.F90:334
subroutine, public lihash_insert(h, key, val)
Insert a (key, val) pair into the hash table h.
Definition: iihash.F90:308
subroutine, public lihash_end(h)
Free a hash table.
Definition: iihash.F90:286
subroutine, public lihash_init(h)
Initialize a hash table h.
Definition: iihash.F90:263
This module implements the index, used for the mesh points.
Definition: index.F90:122
subroutine, public index_init(idx, dim)
This subroutine allocates memory and initializes some components.
Definition: index.F90:218
integer(int64) function, public get_blocked_index(dim, point, bsize, number_of_blocks, increase_with_dimensions)
Definition: index.F90:497
subroutine, public index_spatial_to_point(idx, dim, ispatial, point)
Definition: index.F90:398
integer, parameter, public idx_hilbert
Definition: index.F90:167
subroutine, public index_point_to_spatial(idx, dim, ispatial, point)
Definition: index.F90:416
integer, parameter, public idx_cubic
Definition: index.F90:167
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:115
subroutine, public merge_sorted_arrays(array, sizes, merged, index_map)
This module contains subroutines, related to the initialization of the mesh.
Definition: mesh_init.F90:117
subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
reorder the points in the grid according to the variables MeshOrder and MeshLocalOrder
Definition: mesh_init.F90:903
subroutine rebalance_array(data_input, data_output, output_sizes)
re-distribute the points to improve load balancing
Definition: mesh_init.F90:807
subroutine, public mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
When running parallel in domains, stencil and np_stencil are needed to compute the ghost points....
Definition: mesh_init.F90:534
subroutine, public mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
First stage mesh initialization.
Definition: mesh_init.F90:170
subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
return the sizes and offsets of a distributed array for all tasks of a mpi group.
Definition: mesh_init.F90:1175
subroutine, public mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
This subroutine creates the global array of spatial indices and the inverse mapping.
Definition: mesh_init.F90:290
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:118
subroutine, public mesh_global_index_to_coords(mesh, ipg, ix)
Given a global point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:873
integer(int64) function, public mesh_global_index_from_coords(mesh, ix)
This function returns the true global index of the point for a given vector of integer coordinates.
Definition: mesh.F90:864
subroutine, public mesh_local_index_to_coords(mesh, ip, ix)
Given a local point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:895
integer(int64) function, public mesh_local2global(mesh, ip)
This function returns the global mesh index for a given local index.
Definition: mesh.F90:907
real(real64) function, dimension(1:mesh%box%dim), public mesh_x_global(mesh, ipg)
Definition: mesh.F90:752
subroutine, public mesh_partition_from_parent(mesh, parent)
create a mesh partition from a given parent mesh
subroutine, public mesh_partition_write_info(mesh, iunit, namespace)
subroutine, public mesh_partition_messages_debug(mesh, namespace)
subroutine, public mesh_partition(mesh, namespace, space, lapl_stencil, vsize)
This routine converts the mesh given by grid points into a graph.
subroutine, public mesh_partition_dump(restart, mesh, vsize, ierr)
subroutine, public mesh_partition_load(restart, mesh, ierr)
subroutine, public messages_end()
Definition: messages.F90:277
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:543
subroutine, public print_date(str)
Definition: messages.F90:1017
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:420
subroutine, public messages_input_error(namespace, var, details, row, column)
Definition: messages.F90:723
This module contains some common usage patterns of MPI routines.
Definition: mpi_lib.F90:115
subroutine, public lmpi_sync_shared_memory_window(window, intranode_grp)
Definition: mpi_lib.F90:192
subroutine, public create_intranode_communicator(base_grp, intranode_grp, internode_grp)
Definition: mpi_lib.F90:204
logical function mpi_grp_is_root(grp)
Definition: mpi.F90:425
type(mpi_grp_t), public mpi_world
Definition: mpi.F90:262
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:341
integer, public mpi_err
used to store return values of mpi calls
Definition: mpi.F90:265
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:145
Some general things and nomenclature:
Definition: par_vec.F90:171
subroutine, public par_vec_end(pv)
Deallocate memory used by pv.
Definition: par_vec.F90:744
subroutine, public par_vec_init(mpi_grp, np_global, idx, stencil, space, partition, pv, namespace)
Initializes a par_vec_type object (parallel vector).
Definition: par_vec.F90:294
integer function, public parse_block(namespace, name, blk, check_varinfo_)
Definition: parser.F90:618
subroutine, public partition_get_global(partition, part_global, root)
Returns the global partition. If root is present, the partition is gathered only in that node....
Definition: partition.F90:396
subroutine, public profiling_end(namespace)
Definition: profiling.F90:413
integer, parameter, public restart_partition
Definition: restart.F90:229
subroutine, public restart_init(restart, namespace, data_type, type, mc, ierr, mesh, dir, exact)
Initializes a restart object.
Definition: restart.F90:514
integer, parameter, public restart_type_dump
Definition: restart.F90:225
integer, parameter, public restart_type_load
Definition: restart.F90:225
subroutine, public restart_end(restart)
Definition: restart.F90:720
This module is intended to contain "only mathematical" functions and procedures.
Definition: sort.F90:117
This module defines stencils used in Octopus.
Definition: stencil.F90:135
This module is intended to contain simple general-purpose utility functions and procedures.
Definition: utils.F90:118
subroutine, public make_array_larger(array, new_size)
Definition: utils.F90:581
class to tell whether a point is inside or outside
Definition: box.F90:141
Describes mesh distribution to nodes.
Definition: mesh.F90:185
This is defined even when running serial.
Definition: mpi.F90:139
Stores all communicators and groups.
Definition: multicomm.F90:206
The class representing the stencil, which is used for non-local mesh operations.
Definition: stencil.F90:163
int true(void)