Octopus
mesh_init.F90
Go to the documentation of this file.
1!! Copyright (C) 2002-2006 M. Marques, A. Castro, A. Rubio, G. Bertsch
2!! Copyright (C) 2021 S. Ohlmann
3!!
4!! This program is free software; you can redistribute it and/or modify
5!! it under the terms of the GNU General Public License as published by
6!! the Free Software Foundation; either version 2, or (at your option)
7!! any later version.
8!!
9!! This program is distributed in the hope that it will be useful,
10!! but WITHOUT ANY WARRANTY; without even the implied warranty of
11!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12!! GNU General Public License for more details.
13!!
14!! You should have received a copy of the GNU General Public License
15!! along with this program; if not, write to the Free Software
16!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17!! 02110-1301, USA.
18!!
19
20#include "global.h"
21
24module mesh_init_oct_m
26 use box_oct_m
30 use debug_oct_m
31 use global_oct_m
32 use iihash_oct_m
33 use index_oct_m
34 use math_oct_m
36 use mesh_oct_m
40 use mpi_oct_m
45 use parser_oct_m
49 use sort_oct_m
50 use space_oct_m
52 use utils_oct_m
53
54 implicit none
55
56 private
57 public :: &
61
62
63 integer, parameter :: INNER_POINT = 1
64 integer, parameter :: ENLARGEMENT_POINT = 2
65 integer, parameter :: BOUNDARY = -1
66
67contains
68
69! ---------------------------------------------------------
76 subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
77 class(mesh_t), intent(inout) :: mesh
78 type(namespace_t), intent(in) :: namespace
79 class(space_t), intent(in) :: space
80 class(box_t), target, intent(in) :: box
81 class(coordinate_system_t), target, intent(in) :: coord_system
82 real(real64), intent(in) :: spacing(1:space%dim)
83 integer, intent(in) :: enlarge(1:space%dim)
84
85 integer :: idir, jj, delta
86 real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
87 logical :: out
88
89 push_sub_with_profile(mesh_init_stage_1)
90
91 mesh%box => box
92
93 safe_allocate(mesh%spacing(1:space%dim))
94 mesh%spacing = spacing ! this number can change in the following
95
96 mesh%use_curvilinear = coord_system%local_basis
97 mesh%coord_system => coord_system
98
99 call index_init(mesh%idx, space%dim)
100 mesh%idx%enlarge = enlarge
101
102 ! get box bounds along the axes that generate the grid points
103 select type (coord_system)
104 class is (affine_coordinates_t)
105 box_bounds = box%bounds(coord_system%basis)
106 class default
107 box_bounds = box%bounds()
108 end select
109
110 ! adjust nr
111 do idir = 1, space%dim
112 chi = m_zero
113 ! the upper border
114 jj = 0
115 out = .false.
116 do while(.not.out)
117 jj = jj + 1
118 chi(idir) = real(jj, real64) * mesh%spacing(idir)
119 if (mesh%use_curvilinear) then
120 x = coord_system%to_cartesian(chi)
121 out = x(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
122 else
123 ! do the same comparison here as in simul_box_contains_points
124 out = chi(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
125 end if
126 end do
127 mesh%idx%nr(2, idir) = jj - 1
128 end do
129
130 ! we have a symmetric mesh (for now)
131 mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
132
133 ! we have to adjust a couple of things for the periodic directions
134 do idir = 1, space%periodic_dim
135 if (mesh%idx%nr(2, idir) == 0) then
136 ! this happens if Spacing > box size
137 mesh%idx%nr(2, idir) = 1
138 mesh%idx%nr(1, idir) = -1
139 end if
140
141 ! We have to adjust the spacing to be commensurate with the box,
142 ! for this we scan the possible values of the grid size around the
143 ! one we selected. We choose the size that has the spacing closest
144 ! to the requested one.
145 do delta = -1, 1
146 spacing_new(delta) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
147 spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
148 end do
149
150 delta = minloc(spacing_new, dim = 1) - 2
151
152 assert(delta >= -1)
153 assert(delta <= 1)
154
155 mesh%spacing(idir) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
157 ! we need to adjust the grid by adding or removing one point
158 if (delta == -1) then
159 mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
160 else if (delta == 1) then
161 mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
162 end if
163
164 end do
165
166 if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) ) then
167 call messages_write('The spacing has been modified to make it commensurate with the periodicity of the system.')
168 call messages_warning()
169 end if
170
171 do idir = space%periodic_dim + 1, space%dim
172 if (mesh%idx%nr(2, idir) == 0) then
173 write(message(1),'(a,i2)') 'Spacing > box size in direction ', idir
174 call messages_fatal(1, namespace=namespace)
175 end if
176 end do
177
178 mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
179 ! compute strides for cubic indices
180 mesh%idx%stride(:) = 1
181 do idir = 2, space%dim+1
182 mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
183 (mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
184 end do
185
186 pop_sub_with_profile(mesh_init_stage_1)
187 end subroutine mesh_init_stage_1
188
189 ! ---------------------------------------------------------
195 !
196 subroutine mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
197 class(mesh_t), intent(inout) :: mesh
198 type(namespace_t), intent(in) :: namespace
199 class(space_t), intent(in) :: space
200 class(box_t), intent(in) :: box
201 type(stencil_t), intent(in) :: stencil
202 logical, optional, intent(in) :: regenerate
203
204 integer :: is
205 real(real64) :: chi(1:space%dim)
206 real(real64) :: pos(space%dim)
207 integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
208 integer(int64) :: global_size
209 integer(int32) :: local_size
210 integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
211 integer :: ip, ib, ib2, np, np_boundary, ii
212 logical :: found
213 type(lihash_t) :: spatial_to_boundary
214 integer(int64), allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
215 integer(int64), allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
216 integer(int64), allocatable :: spatial_to_grid(:)
217 integer, allocatable :: sizes(:)
218 integer(int64), allocatable :: offsets(:)
219 integer :: size_boundary
220#ifdef HAVE_MPI
221 integer(int64), pointer :: ptr(:)
222 type(mpi_grp_t) :: internode_grp, intranode_grp
223#endif
224
225 push_sub_with_profile(mesh_init_stage_2)
226
227 if (.not. optional_default(regenerate, .false.)) then
228 ! enlarge mesh for boundary points
229 mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
230 mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
231 end if
232
233 !%Variable MeshIndexType
234 !%Type integer
235 !%Default idx_cubic
236 !%Section Mesh
237 !%Description
238 !% Determine index type. Must be the same for restarting a calculation.
239 !%Option idx_cubic 1
240 !% Cubic indices are used to map the spatial information to the grid points.
241 !%Option idx_hilbert 2
242 !% A Hilbert space-filling curve is used to map the spatial information to
243 !% the grid points.
244 !%End
245 call parse_variable(namespace, 'MeshIndexType', idx_cubic, mesh%idx%type)
246
247 grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
248 mesh%idx%offset = grid_sizes/2
249 if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64)))) then
250 write(message(1), '(A, I10, A, I2, A)') "Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
251 " points in one direction for ", space%dim, " dimensions. This is not supported."
252 call messages_fatal(1, namespace=namespace)
253 end if
254 global_size = product(int(grid_sizes, int64))
255 ! compute the bits per dimension: grid_sizes(i) <= 2**bits
256 mesh%idx%bits = maxval(ceiling(log(real(grid_sizes, real64) )/log(2.)))
257
258 if (mesh%idx%type == idx_cubic) then
259 spatial_size = global_size
260 else if (mesh%idx%type == idx_hilbert) then
261 spatial_size = 2**(space%dim*mesh%idx%bits)
262 end if
263
264 ! use block data decomposition of spatial indices
265 istart = spatial_size * mpi_world%rank/mpi_world%size
266 iend = spatial_size * (mpi_world%rank+1)/mpi_world%size - 1
267 if (.not. (iend - istart + 1 < huge(0_int32))) then
268 write(message(1), '(A, I10, A, I2, A)') "Error: local grid too large, more than ", &
269 huge(0_int32), " points. This is not supported. Maybe use more MPI ranks?"
270 call messages_fatal(1, namespace=namespace)
271 end if
272 local_size = int(iend - istart + 1, int32)
273
274 safe_allocate(grid_to_spatial_initial(1:local_size))
275
276 ! get inner grid indices
277 ip = 1
278 do ispatial = istart, iend
279 call index_spatial_to_point(mesh%idx, space%dim, ispatial, point)
280 ! first check if point is outside bounding box
281 if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
282 if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
283 ! then check if point is inside simulation box
284 chi = real(point, real64) * mesh%spacing
285 pos = mesh%coord_system%to_cartesian(chi)
286 if (.not. box%contains_point(pos)) cycle
287 grid_to_spatial_initial(ip) = ispatial
288 assert(ip + 1 < huge(ip))
289 ip = ip + 1
290 end do
291 np = ip - 1
292
293 call rebalance_array(grid_to_spatial_initial(1:np), grid_to_spatial, sizes)
294 np = sizes(mpi_world%rank)
295
296 safe_deallocate_a(grid_to_spatial_initial)
297
298 safe_allocate(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
299 safe_deallocate_a(sizes)
300
301 !$omp parallel do
302 do ispatial = grid_to_spatial(1), grid_to_spatial(np)
303 spatial_to_grid(ispatial) = -1
304 end do
305 !$omp parallel do
306 do ip = 1, np
307 spatial_to_grid(grid_to_spatial(ip)) = ip
308 end do
309
310 ! get local boundary indices
311 call lihash_init(spatial_to_boundary)
312 size_boundary = np
313 safe_allocate(boundary_to_spatial(1:size_boundary))
314 ib = 1
315 do ip = 1, np
316 call index_spatial_to_point(mesh%idx, space%dim, grid_to_spatial(ip), point)
317 do is = 1, stencil%size
318 if (stencil%center == is) cycle
319 point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
320 ! check if point is in inner part
321 call index_point_to_spatial(mesh%idx, space%dim, ispatialb, point_stencil)
322 assert(ispatialb >= 0)
323 if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
324 ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64)) then
325 if (spatial_to_grid(ispatialb) > 0) cycle
326 end if
327 ! then check if point is inside simulation box
328 chi = real(point_stencil, real64) * mesh%spacing
329 pos = mesh%coord_system%to_cartesian(chi)
330 if (box%contains_point(pos)) cycle
331 ! it has to be a boundary point now
332 ! check if already counted
333 ib2 = lihash_lookup(spatial_to_boundary, ispatialb, found)
334 if (found) cycle
335 boundary_to_spatial(ib) = ispatialb
336 call lihash_insert(spatial_to_boundary, ispatialb, ib)
337 ib = ib + 1
338 ! enlarge array
339 if (ib >= size_boundary) then
340 size_boundary = size_boundary * 2
341 call make_array_larger(boundary_to_spatial, size_boundary)
342 end if
343 end do
344 end do
345 np_boundary = ib - 1
346 call lihash_end(spatial_to_boundary)
347 safe_deallocate_a(spatial_to_grid)
348
349 ! reorder inner points
350 call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
351 safe_deallocate_a(grid_to_spatial)
352
353 call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
354 np = sizes(mpi_world%rank)
355 mesh%np_global = sizes(0)
356 do ii = 1, mpi_world%size - 1
357 mesh%np_global = mesh%np_global + sizes(ii)
358 end do
359 safe_deallocate_a(sizes)
360 safe_deallocate_a(grid_to_spatial_reordered)
361
362 ! reorder boundary points
363 call make_array_larger(boundary_to_spatial, np_boundary)
364 call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
365 safe_deallocate_a(boundary_to_spatial)
366
367 call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
368 safe_deallocate_a(boundary_to_spatial_reordered)
369
370 ! global grid size
371 np_boundary = sizes(mpi_world%rank)
372 mesh%np_part_global = mesh%np_global + sizes(0)
373 do ii = 1, mpi_world%size - 1
374 mesh%np_part_global = mesh%np_part_global + sizes(ii)
375 end do
376 safe_deallocate_a(sizes)
377
378
379 ! get global indices
380#ifdef HAVE_MPI
381 ! create shared memory window and fill it only on root
382 call create_intranode_communicator(mpi_world, intranode_grp, internode_grp)
383 call lmpi_create_shared_memory_window(mesh%np_part_global, intranode_grp, &
384 mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
385#else
386 safe_allocate(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
387#endif
388 ! inner grid
389 call get_sizes_offsets(np, sizes, offsets)
390 call mpi_world%gatherv(grid_to_spatial, np, mpi_integer8, &
391 mesh%idx%grid_to_spatial_global, sizes, offsets, mpi_integer8, 0)
392
393 ! boundary indices
394 call get_sizes_offsets(np_boundary, sizes, offsets)
395 call mpi_world%gatherv(boundary_to_spatial, np_boundary, mpi_integer8, &
396 mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, mpi_integer8, 0)
397
398 ! fill global hash map
399#ifdef HAVE_MPI
400 ! create shared memory window and fill it only on root
401 call lmpi_create_shared_memory_window(spatial_size, intranode_grp, &
402 mesh%idx%window_spatial_to_grid, ptr)
403 mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
404#else
405 safe_allocate(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
406#endif
407 if (mpi_grp_is_root(mpi_world)) then
408 ! fill only on root, then broadcast
409 !$omp parallel do
410 do ispatial = 0, spatial_size-1
411 mesh%idx%spatial_to_grid_global(ispatial) = 0
412 end do
413 !$omp parallel do
414 do ipg = 1, mesh%np_part_global
415 mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
416 end do
417 end if
418
419#ifdef HAVE_MPI
420 ! now broadcast the global arrays to local rank 0 on each node
421 if (intranode_grp%rank == 0) then
422 call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, mpi_integer8, 0)
423 call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, mpi_integer8, 0)
424 end if
425 call lmpi_sync_shared_memory_window(mesh%idx%window_grid_to_spatial, intranode_grp)
426 call lmpi_sync_shared_memory_window(mesh%idx%window_spatial_to_grid, intranode_grp)
427#endif
428
429 safe_deallocate_a(offsets)
430 safe_deallocate_a(sizes)
431
432 safe_deallocate_a(boundary_to_spatial)
433 safe_deallocate_a(grid_to_spatial)
434
435 pop_sub_with_profile(mesh_init_stage_2)
436 end subroutine mesh_init_stage_2
437
438! ---------------------------------------------------------
443! ---------------------------------------------------------
444 subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
445 class(mesh_t), intent(inout) :: mesh
446 type(namespace_t), intent(in) :: namespace
447 class(space_t), intent(in) :: space
448 type(stencil_t), intent(in) :: stencil
449 type(multicomm_t), intent(in) :: mc
450 type(mesh_t), optional, intent(in) :: parent
451 logical, optional, intent(in) :: regenerate
452
453 integer :: ip
454
455 push_sub_with_profile(mesh_init_stage_3)
456
457 call mpi_grp_init(mesh%mpi_grp, mc%group_comm(p_strategy_domains))
458
459 ! check if we are running in parallel in domains
460 mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
461
462 call checksum_calculate(1, mesh%np_part_global, mesh%idx%grid_to_spatial_global(1), &
463 mesh%idx%checksum)
464
465 if (mesh%parallel_in_domains) then
466 call do_partition()
467 else
468 ! When running serially those two are the same.
469 assert(mesh%np_part_global < huge(mesh%np_part))
470 mesh%np = i8_to_i4(mesh%np_global)
471 mesh%np_part = i8_to_i4(mesh%np_part_global)
472
473 ! These must be initialized for par_vec_gather, par_vec_scatter to work
474 ! as copy operations when running without domain parallelization.
475 mesh%pv%np_global = mesh%np_global
476 mesh%pv%np_ghost = 0
477 mesh%pv%np_bndry = mesh%np_part - mesh%np
478 mesh%pv%npart = 1
479 mesh%pv%xlocal = 1
480 end if
481
482 ! Compute mesh%x
483 safe_allocate(mesh%x(1:mesh%np_part, 1:space%dim))
484 !$omp parallel do
485 do ip = 1, mesh%np_part
486 mesh%x(ip, 1:space%dim) = m_zero
487 end do
488
489 do ip = 1, mesh%np_part
490 mesh%x(ip, 1:space%dim) = mesh_x_global(mesh, mesh_local2global(mesh, ip))
491 end do
492
493 call mesh_get_vol_pp()
494
495 pop_sub_with_profile(mesh_init_stage_3)
496
497 contains
498 ! ---------------------------------------------------------
499 subroutine do_partition()
500#ifdef HAVE_MPI
501 integer :: jj, ipart, jpart
502 integer(int64) :: ipg
503 integer, allocatable :: gindex(:), gedges(:)
504 logical, allocatable :: nb(:, :)
505 integer :: idx(space%dim), jx(space%dim)
506 type(mpi_comm) :: graph_comm
507 integer :: iedge, nnb
508 logical :: use_topo, reorder, partition_print
509 integer :: ierr
510
511 logical :: has_virtual_partition = .false.
512 integer :: vsize
513 type(restart_t) :: restart_load, restart_dump
514 integer, allocatable :: part_vec(:)
515
517
518 !Try to load the partition from the restart files
519 if (.not. optional_default(regenerate, .false.)) then
520 call restart_init(restart_load, namespace, restart_partition, restart_type_load, mc, ierr, mesh=mesh, exact=.true.)
521 if (ierr == 0) call mesh_partition_load(restart_load, mesh, ierr)
522 call restart_end(restart_load)
523 else
524 ierr = 0
525 end if
526
527 if (ierr /= 0) then
528
529 !%Variable MeshPartitionVirtualSize
530 !%Type integer
531 !%Default mesh mpi_grp size
532 !%Section Execution::Parallelization
533 !%Description
534 !% Gives the possibility to change the partition nodes.
535 !% Afterward, it crashes.
536 !%End
537 call parse_variable(namespace, 'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
538
539 if (vsize /= mesh%mpi_grp%size) then
540 write(message(1),'(a,I7)') "Changing the partition size to", vsize
541 write(message(2),'(a)') "The execution will crash."
542 call messages_warning(2, namespace=namespace)
543 has_virtual_partition = .true.
544 else
545 has_virtual_partition = .false.
546 end if
547
548 if (.not. present(parent)) then
549 call mesh_partition(mesh, namespace, space, stencil, vsize)
550 else
551 ! if there is a parent grid, use its partition
552 call mesh_partition_from_parent(mesh, parent)
553 end if
554
555 !Now that we have the partitions, we save them
556 call restart_init(restart_dump, namespace, restart_partition, restart_type_dump, mc, ierr, mesh=mesh)
557 call mesh_partition_dump(restart_dump, mesh, vsize, ierr)
558 call restart_end(restart_dump)
559 end if
560
561 if (has_virtual_partition) then
562 call profiling_end(namespace)
563 call print_date("Calculation ended on ")
564 write(message(1),'(a)') "Execution has ended."
565 write(message(2),'(a)') "If you want to run your system, do not use MeshPartitionVirtualSize."
566 call messages_warning(2, namespace=namespace)
567 call messages_end()
568 call global_end()
569 stop
570 end if
571
572 !%Variable MeshUseTopology
573 !%Type logical
574 !%Default false
575 !%Section Execution::Parallelization
576 !%Description
577 !% (experimental) If enabled, <tt>Octopus</tt> will use an MPI virtual
578 !% topology to map the processors. This can improve performance
579 !% for certain interconnection systems.
580 !%End
581 call parse_variable(namespace, 'MeshUseTopology', .false., use_topo)
582
583 if (use_topo) then
584 ! At the moment we still need the global partition. This will be removed in near future.
585 safe_allocate(part_vec(1:mesh%np_part_global))
586 call partition_get_global(mesh%partition, part_vec(1:mesh%np_global))
587
588
589 ! generate a table of neighbours
590
591 safe_allocate(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
592 nb = .false.
593
594 do ipg = 1, mesh%np_global
595 ipart = part_vec(ipg)
596 call mesh_global_index_to_coords(mesh, ipg, idx)
597 do jj = 1, stencil%size
598 jx = idx + stencil%points(:, jj)
599 if (all(jx >= mesh%idx%nr(1, :)) .and. all(jx <= mesh%idx%nr(2, :))) then
600 jpart = part_vec(mesh_global_index_from_coords(mesh, jx))
601 if (ipart /= jpart ) nb(ipart, jpart) = .true.
602 end if
603 end do
604 end do
605 safe_deallocate_a(part_vec)
606
607 ! now generate the information of the graph
608
609 safe_allocate(gindex(1:mesh%mpi_grp%size))
610 safe_allocate(gedges(1:count(nb)))
611
612 ! and now generate it
613 iedge = 0
614 do ipart = 1, mesh%mpi_grp%size
615 do jpart = 1, mesh%mpi_grp%size
616 if (nb(ipart, jpart)) then
617 iedge = iedge + 1
618 gedges(iedge) = jpart - 1
619 end if
620 end do
621 gindex(ipart) = iedge
622 end do
623
624 assert(iedge == count(nb))
625
626 reorder = .true.
627 call mpi_graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm, mpi_err)
628
629 ! we have a new communicator
630 call mpi_grp_init(mesh%mpi_grp, graph_comm)
631
632 safe_deallocate_a(nb)
633 safe_deallocate_a(gindex)
634 safe_deallocate_a(gedges)
635
636 end if
637
638 if (optional_default(regenerate, .false.)) call par_vec_end(mesh%pv)
639 call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
640 space, mesh%partition, mesh%pv, namespace)
641
642 ! check the number of ghost neighbours in parallel
643 nnb = 0
644 jpart = mesh%pv%partno
645 do ipart = 1, mesh%pv%npart
646 if (ipart == jpart) cycle
647 if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
648 end do
649 assert(nnb >= 0 .and. nnb < mesh%pv%npart)
650
651 ! Set local point numbers.
652 mesh%np = mesh%pv%np_local
653 mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
654
655 !%Variable PartitionPrint
656 !%Type logical
657 !%Default true
658 !%Section Execution::Parallelization
659 !%Description
660 !% (experimental) If disabled, <tt>Octopus</tt> will not compute
661 !% nor print the partition information, such as local points,
662 !% no. of neighbours, ghost points and boundary points.
663 !%End
664 call parse_variable(namespace, 'PartitionPrint', .true., partition_print)
665
666 if (partition_print) then
667 call mesh_partition_write_info(mesh, namespace=namespace)
668 call mesh_partition_messages_debug(mesh, namespace)
669 end if
670#endif
671
673 end subroutine do_partition
674
675
676 ! ---------------------------------------------------------
678 subroutine mesh_get_vol_pp()
679
680 integer :: jj(space%dim), ip, np
681 real(real64) :: chi(space%dim)
682
684
685 np = 1
686 if (mesh%use_curvilinear) np = mesh%np_part
687 ! If no local point, we should not try to access the arrays
688 if (mesh%np_part == 0) np = 0
689
690 safe_allocate(mesh%vol_pp(1:np))
691
692 do ip = 1, np
693 mesh%vol_pp(ip) = product(mesh%spacing)
694 end do
695
696 do ip = 1, np
697 call mesh_local_index_to_coords(mesh, ip, jj)
698 chi = jj*mesh%spacing
699 mesh%vol_pp(ip) = mesh%vol_pp(ip)*mesh%coord_system%det_Jac(mesh%x(ip, :), chi)
700 end do
701
702 if (mesh%use_curvilinear .or. mesh%np_part == 0) then
703 mesh%volume_element = m_one
704 else
705 mesh%volume_element = mesh%vol_pp(1)
706 end if
707
709 end subroutine mesh_get_vol_pp
710
711 end subroutine mesh_init_stage_3
712
717 subroutine rebalance_array(data_input, data_output, output_sizes)
718 integer(int64), contiguous, intent(in) :: data_input(:)
719 integer(int64), allocatable, intent(out) :: data_output(:)
720 integer, allocatable, optional, intent(out) :: output_sizes(:)
721
722 integer, allocatable :: initial_sizes(:), final_sizes(:)
723 integer(int64), allocatable :: initial_offsets(:), final_offsets(:)
724 integer, allocatable :: scounts(:), sdispls(:)
725 integer, allocatable :: rcounts(:), rdispls(:)
726 integer :: irank
727 integer(int64) :: itmp
728
729 push_sub(rebalance_array)
730
731 ! collect current sizes of distributed array
732 safe_allocate(initial_sizes(0:mpi_world%size-1))
733 call mpi_world%allgather(size(data_input), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
734 safe_allocate(initial_offsets(0:mpi_world%size))
735 initial_offsets(0) = 0
736 do irank = 1, mpi_world%size
737 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
738 end do
739
740 ! now redistribute the arrays
741 ! use block data decomposition of grid indices
742 safe_allocate(final_offsets(0:mpi_world%size))
743 safe_allocate(final_sizes(0:mpi_world%size-1))
744
745 do irank = 0, mpi_world%size
746 final_offsets(irank) = sum(int(initial_sizes, int64)) * irank/mpi_world%size
747 end do
748 do irank = 0, mpi_world%size - 1
749 assert(final_offsets(irank + 1) - final_offsets(irank) < huge(0_int32))
750 final_sizes(irank) = int(final_offsets(irank + 1) - final_offsets(irank), int32)
751 end do
752
753 safe_allocate(scounts(0:mpi_world%size-1))
754 safe_allocate(sdispls(0:mpi_world%size-1))
755 safe_allocate(rcounts(0:mpi_world%size-1))
756 safe_allocate(rdispls(0:mpi_world%size-1))
757 ! determine communication pattern
758 scounts = 0
759 do irank = 0, mpi_world%size - 1
760 ! get overlap of initial and final distribution
761 itmp = min(final_offsets(irank+1), initial_offsets(mpi_world%rank+1)) - &
762 max(final_offsets(irank), initial_offsets(mpi_world%rank))
763 assert(itmp < huge(0_int32))
764 if (itmp < 0) then
765 scounts(irank) = 0
766 else
767 scounts(irank) = int(itmp, int32)
768 end if
769 end do
770 sdispls(0) = 0
771 do irank = 1, mpi_world%size - 1
772 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
773 end do
774 assert(sum(int(scounts, int64)) < huge(0_int32))
775 assert(sum(scounts) == initial_sizes(mpi_world%rank))
776
777 rcounts = 0
778 do irank = 0, mpi_world%size - 1
779 ! get overlap of initial and final distribution
780 itmp = min(final_offsets(mpi_world%rank+1), initial_offsets(irank+1)) - &
781 max(final_offsets(mpi_world%rank), initial_offsets(irank))
782 assert(itmp < huge(0_int32))
783 if (itmp < 0) then
784 rcounts(irank) = 0
785 else
786 rcounts(irank) = int(itmp, int32)
787 end if
788 end do
789 rdispls(0) = 0
790 do irank = 1, mpi_world%size - 1
791 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
792 end do
793 ! check for consistency between sending and receiving
794 assert(sum(rcounts) == final_sizes(mpi_world%rank))
795
796 safe_allocate(data_output(1:final_sizes(mpi_world%rank)))
797 call mpi_world%alltoallv(data_input, scounts, sdispls, mpi_integer8, &
798 data_output, rcounts, rdispls, mpi_integer8)
799
800 ! save final sizes of array if optional argument present
801 if (present(output_sizes)) then
802 safe_allocate(output_sizes(0:mpi_world%size-1))
803 output_sizes(:) = final_sizes(:)
804 end if
805
806 pop_sub(rebalance_array)
807 end subroutine rebalance_array
808
813 subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
814 type(namespace_t), intent(in) :: namespace
815 class(space_t), intent(in) :: space
816 type(index_t), intent(in) :: idx
817 integer(int64), intent(in) :: grid_to_spatial(:)
818 integer(int64), allocatable, intent(out) :: grid_to_spatial_reordered(:)
819
820 integer :: bsize(space%dim), order, default
821 integer :: nn, idir, ipg, ip, number_of_blocks(space%dim)
822 type(block_t) :: blk
823 integer, parameter :: &
824 ORDER_BLOCKS = 1, &
825 order_original = 2, &
826 order_cube = 3
827 integer :: point(1:space%dim)
828 integer(int64), allocatable :: reorder_indices(:), reorder_recv(:)
829 integer, allocatable :: index_map(:), indices(:)
830 integer(int64), allocatable :: grid_to_spatial_recv(:)
831 integer, allocatable :: initial_sizes(:)
832 integer(int64), allocatable :: initial_offsets(:)
833 integer(int64) :: istart, iend, indstart, indend, spatial_size
834 integer :: irank, local_size, num_recv
835 integer :: iunique, nunique
836 integer :: direction
837 logical :: increase_with_dimension
838
839 integer, allocatable :: scounts(:), sdispls(:), rcounts(:), rdispls(:)
840 integer(int64), allocatable :: spatial_cutoff(:)
841
842 push_sub(reorder_points)
843
844 !%Variable MeshOrder
845 !%Type integer
846 !%Section Execution::Optimization
847 !%Description
848 !% This variable controls how the grid points are mapped to a
849 !% linear array for global arrays. For runs that are parallel
850 !% in domains, the local mesh order may be different (see
851 !% <tt>MeshLocalOrder</tt>).
852 !% The default is blocks when serial in domains and cube when
853 !% parallel in domains with the local mesh order set to blocks.
854 !%Option order_blocks 1
855 !% The grid is mapped using small parallelepipedic grids. The size
856 !% of the blocks is controlled by <tt>MeshBlockSize</tt>.
857 !%Option order_original 2
858 !% The original order of the indices is used to map the grid.
859 !%Option order_cube 3
860 !% The grid is mapped using a full cube, i.e. without blocking.
861 !%End
862 default = order_blocks
863 call parse_variable(namespace, 'MeshOrder', default, order)
864 ! no reordering in 1D necessary
865 if (space%dim == 1) then
866 order = order_original
867 end if
868
869 !%Variable MeshBlockDirection
870 !%Type integer
871 !%Section Execution::Optimization
872 !%Description
873 !% Determines the direction in which the dimensions are chosen to compute
874 !% the blocked index for sorting the mesh points (see MeshBlockSize).
875 !% The default is increase_with_dimensions, corresponding to xyz ordering
876 !% in 3D.
877 !%Option increase_with_dimension 1
878 !% The fastest changing index is in the first dimension, i.e., in 3D this
879 !% corresponds to ordering in xyz directions.
880 !%Option decrease_with_dimension 2
881 !% The fastest changing index is in the last dimension, i.e., in 3D this
882 !% corresponds to ordering in zyx directions.
883 !%End
884 call parse_variable(namespace, 'MeshBlockDirection', 1, direction)
885 increase_with_dimension = direction == 1
886 if (direction /= 1 .and. direction /= 2) then
887 call messages_input_error(namespace, 'MeshBlockDirection')
888 end if
889
890 select case (order)
891 case (order_original)
892 ! only copy points, they stay in their original ordering
893 safe_allocate(grid_to_spatial_reordered(1:size(grid_to_spatial)))
894 grid_to_spatial_reordered(1:size(grid_to_spatial)) = grid_to_spatial(1:size(grid_to_spatial))
895 case (order_blocks, order_cube)
896 if (order == order_cube) then
897 bsize = idx%nr(2, :) - idx%nr(1, :) + 1
898 else
899 !%Variable MeshBlockSize
900 !%Type block
901 !%Section Execution::Optimization
902 !%Description
903 !% To improve memory-access locality when calculating derivatives,
904 !% <tt>Octopus</tt> arranges mesh points in blocks. This variable
905 !% controls the size of this blocks in the different
906 !% directions. The default is selected according to the value of
907 !% the <tt>StatesBlockSize</tt> variable. (This variable only affects the
908 !% performance of <tt>Octopus</tt> and not the results.)
909 !%End
910 if (conf%target_states_block_size < 16) then
911 bsize(1) = 80 * 4 / abs(conf%target_states_block_size)
912 if (space%dim > 1) bsize(2) = 4
913 if (space%dim > 2) bsize(3:) = 10
914 else
915 bsize(1) = max(4 * 16 / abs(conf%target_states_block_size), 1)
916 if (space%dim > 1) bsize(2) = 15
917 if (space%dim > 2) bsize(3:) = 15
918 end if
919
920 if (parse_block(namespace, 'MeshBlockSize', blk) == 0) then
921 nn = parse_block_cols(blk, 0)
922 if (nn /= space%dim) then
923 message(1) = "Error: number of entries in MeshBlockSize must match the number of dimensions."
924 call messages_fatal(1, namespace=namespace)
925 end if
926 do idir = 1, nn
927 call parse_block_integer(blk, 0, idir - 1, bsize(idir))
928 end do
929 end if
930 end if
931
932 number_of_blocks = (idx%nr(2, :) - idx%nr(1, :) + 1) / bsize + 1
933
934
935 ! do the global reordering in parallel, use block data decomposition of global indices
936 ! reorder indices along blocked parallelepiped curve
937
938 ! collect current sizes of distributed array
939 safe_allocate(initial_sizes(0:mpi_world%size-1))
940 call mpi_world%allgather(size(grid_to_spatial), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
941 safe_allocate(initial_offsets(0:mpi_world%size))
942 initial_offsets(0) = 0
943 do irank = 1, mpi_world%size
944 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
945 end do
946
947 ! get local range and size
948 istart = initial_offsets(mpi_world%rank)
949 iend = initial_offsets(mpi_world%rank + 1) - 1
950 assert(iend - istart + 1 < huge(0_int32))
951 local_size = int(iend - istart + 1, int32)
952 assert(local_size == initial_sizes(mpi_world%rank))
953
954 ! compute new indices locally
955 safe_allocate(reorder_indices(1:local_size))
956 safe_allocate(indices(1:local_size))
957 safe_allocate(grid_to_spatial_reordered(1:local_size))
958 !$omp parallel do private(point)
959 do ip = 1, local_size
960 call index_spatial_to_point(idx, space%dim, grid_to_spatial(ip), point)
961 point = point + idx%offset
962 reorder_indices(ip) = get_blocked_index(space%dim, point, bsize, number_of_blocks, increase_with_dimension)
963 end do
964 ! parallel sort according to the new indices
965 ! sort the local array
966 call sort(reorder_indices, indices)
967 ! save reordered indices to send to other processes
968 !$omp parallel do
969 do ip = 1, local_size
970 grid_to_spatial_reordered(ip) = grid_to_spatial(indices(ip))
971 end do
972
973 ! get minimum and maximum
974 indstart = reorder_indices(1)
975 indend = reorder_indices(local_size)
976 call mpi_world%allreduce_inplace(indstart, 1, mpi_integer8, mpi_min)
977 call mpi_world%allreduce_inplace(indend, 1, mpi_integer8, mpi_max)
978 spatial_size = indend - indstart + 1
979
980 ! get index ranges for each rank
981 safe_allocate(spatial_cutoff(0:mpi_world%size-1))
982 do irank = 0, mpi_world%size - 1
983 spatial_cutoff(irank) = spatial_size * (irank+1)/mpi_world%size + indstart
984 end do
985
986 safe_allocate(scounts(0:mpi_world%size-1))
987 safe_allocate(sdispls(0:mpi_world%size-1))
988 safe_allocate(rcounts(0:mpi_world%size-1))
989 safe_allocate(rdispls(0:mpi_world%size-1))
990 ! get send counts
991 scounts = 0
992 irank = 0
993 ! the indices are ordered, so we can go through them and increase
994 ! the rank to which they are associated to when we cross a cutoff
995 do ip = 1, local_size
996 if (reorder_indices(ip) >= spatial_cutoff(irank)) then
997 ! this do loop is needed in case some ranks do not have any points
998 do while (reorder_indices(ip) >= spatial_cutoff(irank))
999 irank = irank + 1
1000 end do
1001 assert(irank < mpi_world%size)
1002 end if
1003 scounts(irank) = scounts(irank) + 1
1004 end do
1005 safe_deallocate_a(spatial_cutoff)
1006 assert(sum(scounts) == local_size)
1007
1008 ! compute communication pattern (sdispls, rcounts, rdispls)
1009 sdispls(0) = 0
1010 do irank = 1, mpi_world%size - 1
1011 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
1012 end do
1013
1014 call mpi_world%alltoall(scounts, 1, mpi_integer, &
1015 rcounts, 1, mpi_integer)
1016
1017 rdispls(0) = 0
1018 do irank = 1, mpi_world%size - 1
1019 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
1020 end do
1021
1022 ! make sure the arrays get allocated also if we do not receive anything
1023 num_recv = max(sum(rcounts), 1)
1024 ! communicate the locally sorted indices
1025 safe_allocate(reorder_recv(1:num_recv))
1026 call mpi_world%alltoallv(reorder_indices, scounts, sdispls, mpi_integer8, &
1027 reorder_recv, rcounts, rdispls, mpi_integer8)
1028 safe_deallocate_a(reorder_indices)
1029
1030 ! communicate the corresponding spatial indices
1031 safe_allocate(grid_to_spatial_recv(1:num_recv))
1032 call mpi_world%alltoallv(grid_to_spatial_reordered, scounts, sdispls, mpi_integer8, &
1033 grid_to_spatial_recv, rcounts, rdispls, mpi_integer8)
1034 safe_deallocate_a(grid_to_spatial_reordered)
1035
1036 ! do k-way merge of sorted indices
1037 safe_allocate(reorder_indices(1:num_recv))
1038 safe_allocate(index_map(1:num_recv))
1039 if (sum(rcounts) > 0) then
1040 call merge_sorted_arrays(reorder_recv, rcounts, reorder_indices, index_map)
1041
1042 ! get number of unique indices, needed for boundary
1043 nunique = 1
1044 do ipg = 2, sum(rcounts)
1045 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1046 nunique = nunique + 1
1047 end if
1048 end do
1049
1050 ! reorder according to new order, but remove duplicate entries
1051 safe_allocate(grid_to_spatial_reordered(1:nunique))
1052 iunique = 1
1053 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(1))
1054 do ipg = 2, sum(rcounts)
1055 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1056 iunique = iunique + 1
1057 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(ipg))
1058 end if
1059 end do
1060 else
1061 safe_allocate(grid_to_spatial_reordered(1:0))
1062 end if
1063
1064 safe_deallocate_a(initial_offsets)
1065 safe_deallocate_a(initial_sizes)
1066
1067 safe_deallocate_a(reorder_indices)
1068 safe_deallocate_a(reorder_recv)
1069
1070 safe_deallocate_a(grid_to_spatial_recv)
1071 safe_deallocate_a(index_map)
1072 safe_deallocate_a(indices)
1073
1074 safe_deallocate_a(scounts)
1075 safe_deallocate_a(sdispls)
1076 safe_deallocate_a(rcounts)
1077 safe_deallocate_a(rdispls)
1078
1079 end select
1080 pop_sub(reorder_points)
1081 end subroutine reorder_points
1082
1085 subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
1086 integer, intent(in) :: local_size
1087 integer, allocatable, intent(out) :: sizes(:)
1088 integer(int64), allocatable, intent(out) :: offsets(:)
1089 type(mpi_grp_t), optional, intent(in) :: mpi_grp
1090
1091 integer :: irank
1092 type(mpi_grp_t) :: mpi_grp_
1093
1094 push_sub(get_sizes_offsets)
1095
1096 if (present(mpi_grp)) then
1097 mpi_grp_ = mpi_grp
1098 else
1099 mpi_grp_ = mpi_world
1100 end if
1101
1102 safe_allocate(sizes(0:mpi_grp_%size-1))
1103 call mpi_grp_%allgather(local_size, 1, mpi_integer, sizes(0), 1, mpi_integer)
1104 safe_allocate(offsets(0:mpi_grp_%size))
1105 offsets(0) = 0
1106 do irank = 1, mpi_grp_%size
1107 offsets(irank) = offsets(irank-1) + sizes(irank-1)
1108 end do
1109
1110 pop_sub(get_sizes_offsets)
1111 end subroutine get_sizes_offsets
1112
1113end module mesh_init_oct_m
1114
1115!! Local Variables:
1116!! mode: f90
1117!! coding: utf-8
1118!! End:
Box bounds along some axes.
Definition: box.F90:177
This is the common interface to a sorting routine. It performs the shell algorithm,...
Definition: sort.F90:149
double log(double __x) __attribute__((__nothrow__
subroutine do_partition()
Definition: mesh_init.F90:593
subroutine mesh_get_vol_pp()
calculate the volume of integration
Definition: mesh_init.F90:772
real(real64), parameter, public box_boundary_delta
Definition: box.F90:132
This module handles the calculation mode.
integer, parameter, public p_strategy_domains
parallelization in domains
real(real64), parameter, public m_two
Definition: global.F90:189
subroutine, public global_end()
Finalise parser varinfo file, and MPI.
Definition: global.F90:381
real(real64), parameter, public m_zero
Definition: global.F90:187
type(conf_t), public conf
Global instance of Octopus configuration.
Definition: global.F90:177
real(real64), parameter, public m_one
Definition: global.F90:188
This module implements a simple hash table for non-negative integer keys and integer values.
Definition: iihash.F90:125
integer function, public lihash_lookup(h, key, found)
Look up a value in the hash table h. If found is present, it indicates if key could be found in the t...
Definition: iihash.F90:334
subroutine, public lihash_insert(h, key, val)
Insert a (key, val) pair into the hash table h.
Definition: iihash.F90:308
subroutine, public lihash_end(h)
Free a hash table.
Definition: iihash.F90:286
subroutine, public lihash_init(h)
Initialize a hash table h.
Definition: iihash.F90:263
This module implements the index, used for the mesh points.
Definition: index.F90:122
subroutine, public index_init(idx, dim)
This subroutine allocates memory and initializes some components.
Definition: index.F90:218
integer(int64) function, public get_blocked_index(dim, point, bsize, number_of_blocks, increase_with_dimensions)
Definition: index.F90:497
subroutine, public index_spatial_to_point(idx, dim, ispatial, point)
Definition: index.F90:398
integer, parameter, public idx_hilbert
Definition: index.F90:167
subroutine, public index_point_to_spatial(idx, dim, ispatial, point)
Definition: index.F90:416
integer, parameter, public idx_cubic
Definition: index.F90:167
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:115
subroutine, public merge_sorted_arrays(array, sizes, merged, index_map)
This module contains subroutines, related to the initialization of the mesh.
Definition: mesh_init.F90:117
subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
reorder the points in the grid according to the variables MeshOrder and MeshLocalOrder
Definition: mesh_init.F90:907
subroutine rebalance_array(data_input, data_output, output_sizes)
re-distribute the points to improve load balancing
Definition: mesh_init.F90:811
subroutine, public mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
When running parallel in domains, stencil and np_stencil are needed to compute the ghost points....
Definition: mesh_init.F90:538
subroutine, public mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
First stage mesh initialization.
Definition: mesh_init.F90:170
subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
return the sizes and offsets of a distributed array for all tasks of a mpi group.
Definition: mesh_init.F90:1179
subroutine, public mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
This subroutine creates the global array of spatial indices and the inverse mapping.
Definition: mesh_init.F90:290
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:118
subroutine, public mesh_global_index_to_coords(mesh, ipg, ix)
Given a global point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:925
integer(int64) function, public mesh_global_index_from_coords(mesh, ix)
This function returns the true global index of the point for a given vector of integer coordinates.
Definition: mesh.F90:916
subroutine, public mesh_local_index_to_coords(mesh, ip, ix)
Given a local point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:947
integer(int64) function, public mesh_local2global(mesh, ip)
This function returns the global mesh index for a given local index.
Definition: mesh.F90:959
real(real64) function, dimension(1:mesh%box%dim), public mesh_x_global(mesh, ipg)
Definition: mesh.F90:804
subroutine, public mesh_partition_from_parent(mesh, parent)
create a mesh partition from a given parent mesh
subroutine, public mesh_partition_write_info(mesh, iunit, namespace)
subroutine, public mesh_partition_messages_debug(mesh, namespace)
subroutine, public mesh_partition(mesh, namespace, space, lapl_stencil, vsize)
This routine converts the mesh given by grid points into a graph.
subroutine, public mesh_partition_dump(restart, mesh, vsize, ierr)
subroutine, public mesh_partition_load(restart, mesh, ierr)
subroutine, public messages_end()
Definition: messages.F90:277
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:543
subroutine, public print_date(str)
Definition: messages.F90:1017
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:420
subroutine, public messages_input_error(namespace, var, details, row, column)
Definition: messages.F90:723
This module contains some common usage patterns of MPI routines.
Definition: mpi_lib.F90:115
subroutine, public lmpi_sync_shared_memory_window(window, intranode_grp)
Definition: mpi_lib.F90:193
subroutine, public create_intranode_communicator(base_grp, intranode_grp, internode_grp)
Definition: mpi_lib.F90:205
logical function mpi_grp_is_root(grp)
Is the current MPI process of grpcomm, root.
Definition: mpi.F90:430
type(mpi_grp_t), public mpi_world
Definition: mpi.F90:266
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:346
integer, public mpi_err
used to store return values of mpi calls
Definition: mpi.F90:269
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:145
Some general things and nomenclature:
Definition: par_vec.F90:171
subroutine, public par_vec_end(pv)
Deallocate memory used by pv.
Definition: par_vec.F90:744
subroutine, public par_vec_init(mpi_grp, np_global, idx, stencil, space, partition, pv, namespace)
Initializes a par_vec_type object (parallel vector).
Definition: par_vec.F90:294
integer function, public parse_block(namespace, name, blk, check_varinfo_)
Definition: parser.F90:618
subroutine, public partition_get_global(partition, part_global, root)
Returns the global partition. If root is present, the partition is gathered only in that node....
Definition: partition.F90:396
subroutine, public profiling_end(namespace)
Definition: profiling.F90:413
integer, parameter, public restart_partition
Definition: restart.F90:229
subroutine, public restart_init(restart, namespace, data_type, type, mc, ierr, mesh, dir, exact)
Initializes a restart object.
Definition: restart.F90:514
integer, parameter, public restart_type_dump
Definition: restart.F90:225
integer, parameter, public restart_type_load
Definition: restart.F90:225
subroutine, public restart_end(restart)
Definition: restart.F90:720
This module is intended to contain "only mathematical" functions and procedures.
Definition: sort.F90:117
This module defines stencils used in Octopus.
Definition: stencil.F90:135
This module is intended to contain simple general-purpose utility functions and procedures.
Definition: utils.F90:118
subroutine, public make_array_larger(array, new_size)
Definition: utils.F90:611
class to tell whether a point is inside or outside
Definition: box.F90:141
Describes mesh distribution to nodes.
Definition: mesh.F90:186
This is defined even when running serial.
Definition: mpi.F90:142
Stores all communicators and groups.
Definition: multicomm.F90:206
The class representing the stencil, which is used for non-local mesh operations.
Definition: stencil.F90:163
int true(void)