Octopus
mesh_init.F90
Go to the documentation of this file.
1!! Copyright (C) 2002-2006 M. Marques, A. Castro, A. Rubio, G. Bertsch
2!! Copyright (C) 2021 S. Ohlmann
3!!
4!! This program is free software; you can redistribute it and/or modify
5!! it under the terms of the GNU General Public License as published by
6!! the Free Software Foundation; either version 2, or (at your option)
7!! any later version.
8!!
9!! This program is distributed in the hope that it will be useful,
10!! but WITHOUT ANY WARRANTY; without even the implied warranty of
11!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12!! GNU General Public License for more details.
13!!
14!! You should have received a copy of the GNU General Public License
15!! along with this program; if not, write to the Free Software
16!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17!! 02110-1301, USA.
18!!
19
20#include "global.h"
21
24module mesh_init_oct_m
26 use box_oct_m
30 use debug_oct_m
31 use global_oct_m
32 use iihash_oct_m
33 use index_oct_m
34 use math_oct_m
36 use mesh_oct_m
40 use mpi_oct_m
45 use parser_oct_m
49 use sort_oct_m
50 use space_oct_m
52 use utils_oct_m
53
54 implicit none
55
56 private
57 public :: &
61
62
63 integer, parameter :: INNER_POINT = 1
64 integer, parameter :: ENLARGEMENT_POINT = 2
65 integer, parameter :: BOUNDARY = -1
66
67contains
68
69! ---------------------------------------------------------
76 subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
77 class(mesh_t), intent(inout) :: mesh
78 type(namespace_t), intent(in) :: namespace
79 class(space_t), intent(in) :: space
80 class(box_t), target, intent(in) :: box
81 class(coordinate_system_t), target, intent(in) :: coord_system
82 real(real64), intent(in) :: spacing(1:space%dim)
83 integer, intent(in) :: enlarge(1:space%dim)
84
85 integer :: idir, jj, delta
86 real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
87 logical :: out
88
89 push_sub_with_profile(mesh_init_stage_1)
90
91 mesh%box => box
92
93 safe_allocate(mesh%spacing(1:space%dim))
94 mesh%spacing = spacing ! this number can change in the following
95
96 mesh%use_curvilinear = coord_system%local_basis
97 mesh%coord_system => coord_system
98
99 call index_init(mesh%idx, space%dim)
100 mesh%idx%enlarge = enlarge
101
102 ! get box bounds along the axes that generate the grid points
103 select type (coord_system)
104 class is (affine_coordinates_t)
105 box_bounds = box%bounds(coord_system%basis)
106 class default
107 box_bounds = box%bounds()
108 end select
109
110 ! adjust nr
111 do idir = 1, space%dim
112 chi = m_zero
113 ! the upper border
114 jj = 0
115 out = .false.
116 do while(.not.out)
117 jj = jj + 1
118 chi(idir) = real(jj, real64) * mesh%spacing(idir)
119 if (mesh%use_curvilinear) then
120 x = coord_system%to_cartesian(chi)
121 out = x(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
122 else
123 ! do the same comparison here as in simul_box_contains_points
124 out = chi(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
125 end if
126 end do
127 mesh%idx%nr(2, idir) = jj - 1
128 end do
129
130 ! we have a symmetric mesh (for now)
131 mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
132
133 ! we have to adjust a couple of things for the periodic directions
134 do idir = 1, space%periodic_dim
135 if (mesh%idx%nr(2, idir) == 0) then
136 ! this happens if Spacing > box size
137 mesh%idx%nr(2, idir) = 1
138 mesh%idx%nr(1, idir) = -1
139 end if
140
141 ! We have to adjust the spacing to be commensurate with the box,
142 ! for this we scan the possible values of the grid size around the
143 ! one we selected. We choose the size that has the spacing closest
144 ! to the requested one.
145 do delta = -1, 1
146 spacing_new(delta) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
147 spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
148 end do
149
150 delta = minloc(spacing_new, dim = 1) - 2
151
152 assert(delta >= -1)
153 assert(delta <= 1)
154
155 mesh%spacing(idir) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
157 ! we need to adjust the grid by adding or removing one point
158 if (delta == -1) then
159 mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
160 else if (delta == 1) then
161 mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
162 end if
163
164 end do
165
166 if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) ) then
167 call messages_write('The spacing has been modified to make it commensurate with the periodicity of the system.')
168 call messages_warning()
169 end if
170
171 do idir = space%periodic_dim + 1, space%dim
172 if (mesh%idx%nr(2, idir) == 0) then
173 write(message(1),'(a,i2)') 'Spacing > box size in direction ', idir
174 call messages_fatal(1, namespace=namespace)
175 end if
176 end do
177
178 mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
179 ! compute strides for cubic indices
180 mesh%idx%stride(:) = 1
181 do idir = 2, space%dim+1
182 mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
183 (mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
184 end do
185
186 pop_sub_with_profile(mesh_init_stage_1)
187 end subroutine mesh_init_stage_1
188
189 ! ---------------------------------------------------------
195 !
196 subroutine mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
197 class(mesh_t), intent(inout) :: mesh
198 type(namespace_t), intent(in) :: namespace
199 class(space_t), intent(in) :: space
200 class(box_t), intent(in) :: box
201 type(stencil_t), intent(in) :: stencil
202 logical, optional, intent(in) :: regenerate
203
204 integer :: is
205 real(real64) :: chi(1:space%dim)
206 real(real64) :: pos(space%dim)
207 integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
208 integer(int64) :: global_size
209 integer(int32) :: local_size
210 integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
211 integer :: ip, ib, ib2, np, np_boundary, ii
212 logical :: found
213 type(lihash_t) :: spatial_to_boundary
214 integer(int64), allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
215 integer(int64), allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
216 integer(int64), allocatable :: spatial_to_grid(:)
217 integer, allocatable :: sizes(:)
218 integer(int64), allocatable :: offsets(:)
219 integer :: size_boundary
220#ifdef HAVE_MPI
221 integer(int64), pointer :: ptr(:)
222 type(mpi_grp_t) :: internode_grp, intranode_grp
223#endif
224
225 push_sub_with_profile(mesh_init_stage_2)
226
227 if (.not. optional_default(regenerate, .false.)) then
228 ! enlarge mesh for boundary points
229 mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
230 mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
231 end if
232
233 !%Variable MeshIndexType
234 !%Type integer
235 !%Default idx_cubic
236 !%Section Mesh
237 !%Description
238 !% Determine index type. Must be the same for restarting a calculation.
239 !%Option idx_cubic 1
240 !% Cubic indices are used to map the spatial information to the grid points.
241 !%Option idx_hilbert 2
242 !% A Hilbert space-filling curve is used to map the spatial information to
243 !% the grid points.
244 !%End
245 call parse_variable(namespace, 'MeshIndexType', idx_cubic, mesh%idx%type)
246
247 grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
248 mesh%idx%offset = grid_sizes/2
249 if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64)))) then
250 write(message(1), '(A, I10, A, I2, A)') "Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
251 " points in one direction for ", space%dim, " dimensions. This is not supported."
252 call messages_fatal(1, namespace=namespace)
253 end if
254 global_size = product(int(grid_sizes, int64))
255 ! compute the bits per dimension: grid_sizes(i) <= 2**bits
256 mesh%idx%bits = maxval(ceiling(log(real(grid_sizes, real64) )/log(2.)))
257
258 if (mesh%idx%type == idx_cubic) then
259 spatial_size = global_size
260 else if (mesh%idx%type == idx_hilbert) then
261 spatial_size = 2**(space%dim*mesh%idx%bits)
262 end if
263
264 ! use block data decomposition of spatial indices
265 istart = spatial_size * mpi_world%rank/mpi_world%size
266 iend = spatial_size * (mpi_world%rank+1)/mpi_world%size - 1
267 if (.not. (iend - istart + 1 < huge(0_int32))) then
268 write(message(1), '(A, I10, A, I2, A)') "Error: local grid too large, more than ", &
269 huge(0_int32), " points. This is not supported. Maybe use more MPI ranks?"
270 call messages_fatal(1, namespace=namespace)
271 end if
272 local_size = int(iend - istart + 1, int32)
273
274 safe_allocate(grid_to_spatial_initial(1:local_size))
275
276 ! get inner grid indices
277 ip = 1
278 do ispatial = istart, iend
279 call index_spatial_to_point(mesh%idx, space%dim, ispatial, point)
280 ! first check if point is outside bounding box
281 if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
282 if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
283 ! then check if point is inside simulation box
284 chi = real(point, real64) * mesh%spacing
285 pos = mesh%coord_system%to_cartesian(chi)
286 if (.not. box%contains_point(pos)) cycle
287 grid_to_spatial_initial(ip) = ispatial
288 assert(ip + 1 < huge(ip))
289 ip = ip + 1
290 end do
291 np = ip - 1
292
293 call rebalance_array(grid_to_spatial_initial(1:np), grid_to_spatial, sizes)
294 np = sizes(mpi_world%rank)
295
296 safe_deallocate_a(grid_to_spatial_initial)
297
298 safe_allocate(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
299 safe_deallocate_a(sizes)
300
301 !$omp parallel do
302 do ispatial = grid_to_spatial(1), grid_to_spatial(np)
303 spatial_to_grid(ispatial) = -1
304 end do
305 !$omp parallel do
306 do ip = 1, np
307 spatial_to_grid(grid_to_spatial(ip)) = ip
308 end do
309
310 ! get local boundary indices
311 call lihash_init(spatial_to_boundary)
312 size_boundary = np
313 safe_allocate(boundary_to_spatial(1:size_boundary))
314 ib = 1
315 do ip = 1, np
316 call index_spatial_to_point(mesh%idx, space%dim, grid_to_spatial(ip), point)
317 do is = 1, stencil%size
318 if (stencil%center == is) cycle
319 point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
320 ! check if point is in inner part
321 call index_point_to_spatial(mesh%idx, space%dim, ispatialb, point_stencil)
322 assert(ispatialb >= 0)
323 if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
324 ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64)) then
325 if (spatial_to_grid(ispatialb) > 0) cycle
326 end if
327 ! then check if point is inside simulation box
328 chi = real(point_stencil, real64) * mesh%spacing
329 pos = mesh%coord_system%to_cartesian(chi)
330 if (box%contains_point(pos)) cycle
331 ! it has to be a boundary point now
332 ! check if already counted
333 ib2 = lihash_lookup(spatial_to_boundary, ispatialb, found)
334 if (found) cycle
335 boundary_to_spatial(ib) = ispatialb
336 call lihash_insert(spatial_to_boundary, ispatialb, ib)
337 ib = ib + 1
338 ! enlarge array
339 if (ib >= size_boundary) then
340 size_boundary = size_boundary * 2
341 call make_array_larger(boundary_to_spatial, size_boundary)
342 end if
343 end do
344 end do
345 np_boundary = ib - 1
346 call lihash_end(spatial_to_boundary)
347 safe_deallocate_a(spatial_to_grid)
348
349 ! reorder inner points
350 call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
351 safe_deallocate_a(grid_to_spatial)
352
353 call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
354 np = sizes(mpi_world%rank)
355 mesh%np_global = sizes(0)
356 do ii = 1, mpi_world%size - 1
357 mesh%np_global = mesh%np_global + sizes(ii)
358 end do
359 safe_deallocate_a(sizes)
360 safe_deallocate_a(grid_to_spatial_reordered)
361
362 ! reorder boundary points
363 call make_array_larger(boundary_to_spatial, np_boundary)
364 call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
365 safe_deallocate_a(boundary_to_spatial)
366
367 call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
368 safe_deallocate_a(boundary_to_spatial_reordered)
369
370 ! global grid size
371 np_boundary = sizes(mpi_world%rank)
372 mesh%np_part_global = mesh%np_global + sizes(0)
373 do ii = 1, mpi_world%size - 1
374 mesh%np_part_global = mesh%np_part_global + sizes(ii)
375 end do
376 safe_deallocate_a(sizes)
377
378
379 ! get global indices
380#ifdef HAVE_MPI
381 ! create shared memory window and fill it only on root
382 call create_intranode_communicator(mpi_world, intranode_grp, internode_grp)
383 call lmpi_create_shared_memory_window(mesh%np_part_global, intranode_grp, &
384 mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
385#else
386 safe_allocate(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
387#endif
388 ! inner grid
389 call get_sizes_offsets(np, sizes, offsets)
390 call mpi_world%gatherv(grid_to_spatial, np, mpi_integer8, &
391 mesh%idx%grid_to_spatial_global, sizes, offsets, mpi_integer8, 0)
392
393 ! boundary indices
394 call get_sizes_offsets(np_boundary, sizes, offsets)
395 call mpi_world%gatherv(boundary_to_spatial, np_boundary, mpi_integer8, &
396 mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, mpi_integer8, 0)
397
398 ! fill global hash map
399#ifdef HAVE_MPI
400 ! create shared memory window and fill it only on root
401 call lmpi_create_shared_memory_window(spatial_size, intranode_grp, &
402 mesh%idx%window_spatial_to_grid, ptr)
403 mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
404#else
405 safe_allocate(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
406#endif
407 if (mpi_grp_is_root(mpi_world)) then
408 ! fill only on root, then broadcast
409 !$omp parallel do
410 do ispatial = 0, spatial_size-1
411 mesh%idx%spatial_to_grid_global(ispatial) = 0
412 end do
413 !$omp parallel do
414 do ipg = 1, mesh%np_part_global
415 mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
416 end do
417 end if
418
419#ifdef HAVE_MPI
420 ! now broadcast the global arrays to local rank 0 on each node
421 if (intranode_grp%rank == 0) then
422 call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, mpi_integer8, 0)
423 call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, mpi_integer8, 0)
424 end if
425 call lmpi_sync_shared_memory_window(mesh%idx%window_grid_to_spatial, intranode_grp)
426 call lmpi_sync_shared_memory_window(mesh%idx%window_spatial_to_grid, intranode_grp)
427#endif
428
429 safe_deallocate_a(offsets)
430 safe_deallocate_a(sizes)
431
432 safe_deallocate_a(boundary_to_spatial)
433 safe_deallocate_a(grid_to_spatial)
434
435 pop_sub_with_profile(mesh_init_stage_2)
436 end subroutine mesh_init_stage_2
437
438! ---------------------------------------------------------
443! ---------------------------------------------------------
444 subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
445 class(mesh_t), intent(inout) :: mesh
446 type(namespace_t), intent(in) :: namespace
447 class(space_t), intent(in) :: space
448 type(stencil_t), intent(in) :: stencil
449 type(multicomm_t), intent(in) :: mc
450 type(mesh_t), optional, intent(in) :: parent
451 logical, optional, intent(in) :: regenerate
452
453 integer :: ip
454
455 push_sub_with_profile(mesh_init_stage_3)
456
457 call mpi_grp_init(mesh%mpi_grp, mc%group_comm(p_strategy_domains))
458
459 ! check if we are running in parallel in domains
460 mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
461
462 call checksum_calculate(1, mesh%np_part_global, mesh%idx%grid_to_spatial_global(1), &
463 mesh%idx%checksum)
464
465 if (mesh%parallel_in_domains) then
466 call do_partition()
467 else
468 ! When running serially those two are the same.
469 assert(mesh%np_part_global < huge(mesh%np_part))
470 mesh%np = i8_to_i4(mesh%np_global)
471 mesh%np_part = i8_to_i4(mesh%np_part_global)
472
473 ! These must be initialized for par_vec_gather, par_vec_scatter to work
474 ! as copy operations when running without domain parallelization.
475 mesh%pv%np_global = mesh%np_global
476 mesh%pv%np_ghost = 0
477 mesh%pv%np_bndry = mesh%np_part - mesh%np
478 mesh%pv%npart = 1
479 mesh%pv%xlocal = 1
480 end if
481
482 ! Compute mesh%x
483 safe_allocate(mesh%x(1:mesh%np_part, 1:space%dim))
484 !$omp parallel do
485 do ip = 1, mesh%np_part
486 mesh%x(ip, 1:space%dim) = m_zero
487 end do
488
489 do ip = 1, mesh%np_part
490 mesh%x(ip, 1:space%dim) = mesh_x_global(mesh, mesh_local2global(mesh, ip))
491 end do
492
493 call mesh_get_vol_pp()
494
495 pop_sub_with_profile(mesh_init_stage_3)
496
497 contains
498 ! ---------------------------------------------------------
499 subroutine do_partition()
500#ifdef HAVE_MPI
501 integer :: jj, ipart, jpart
502 integer(int64) :: ipg, jpg
503 integer, allocatable :: gindex(:), gedges(:)
504 logical, allocatable :: nb(:, :)
505 integer :: idx(space%dim), jx(space%dim)
506 type(mpi_comm) :: graph_comm
507 integer :: iedge, nnb
508 logical :: use_topo, reorder, partition_print
509 integer :: ierr
510
511 logical :: has_virtual_partition = .false.
512 integer :: vsize
513 type(restart_t) :: restart_load, restart_dump
514 integer, allocatable :: part_vec(:)
515
517
518 !Try to load the partition from the restart files
519 if (.not. optional_default(regenerate, .false.)) then
520 call restart_init(restart_load, namespace, restart_partition, restart_type_load, mc, ierr, mesh=mesh, exact=.true.)
521 if (ierr == 0) call mesh_partition_load(restart_load, mesh, ierr)
522 call restart_end(restart_load)
523 else
524 ierr = 0
525 end if
526
527 if (ierr /= 0) then
528
529 !%Variable MeshPartitionVirtualSize
530 !%Type integer
531 !%Default mesh mpi_grp size
532 !%Section Execution::Parallelization
533 !%Description
534 !% Gives the possibility to change the partition nodes.
535 !% Afterward, it crashes.
536 !%End
537 call parse_variable(namespace, 'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
538
539 if (vsize /= mesh%mpi_grp%size) then
540 write(message(1),'(a,I7)') "Changing the partition size to", vsize
541 write(message(2),'(a)') "The execution will crash."
542 call messages_warning(2, namespace=namespace)
543 has_virtual_partition = .true.
544 else
545 has_virtual_partition = .false.
546 end if
547
548 if (.not. present(parent)) then
549 call mesh_partition(mesh, namespace, space, stencil, vsize)
550 else
551 ! if there is a parent grid, use its partition
552 call mesh_partition_from_parent(mesh, parent)
553 end if
554
555 !Now that we have the partitions, we save them
556 call restart_init(restart_dump, namespace, restart_partition, restart_type_dump, mc, ierr, mesh=mesh)
557 call mesh_partition_dump(restart_dump, mesh, vsize, ierr)
558 call restart_end(restart_dump)
559 end if
560
561 if (has_virtual_partition) then
562 call profiling_end(namespace)
563 call print_date("Calculation ended on ")
564 write(message(1),'(a)') "Execution has ended."
565 write(message(2),'(a)') "If you want to run your system, do not use MeshPartitionVirtualSize."
566 call messages_warning(2, namespace=namespace)
567 call messages_end()
568 call global_end()
569 stop
570 end if
571
572 !%Variable MeshUseTopology
573 !%Type logical
574 !%Default false
575 !%Section Execution::Parallelization
576 !%Description
577 !% (experimental) If enabled, <tt>Octopus</tt> will use an MPI virtual
578 !% topology to map the processors. This can improve performance
579 !% for certain interconnection systems.
580 !%End
581 call parse_variable(namespace, 'MeshUseTopology', .false., use_topo)
582
583 if (use_topo) then
584 ! At the moment we still need the global partition. This will be removed in near future.
585 safe_allocate(part_vec(1:mesh%np_global))
586 call partition_get_global(mesh%partition, part_vec(1:mesh%np_global))
587
588
589 ! generate a table of neighbours
590
591 safe_allocate(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
592 nb = .false.
593
594 do ipg = 1, mesh%np_global
595 ipart = part_vec(ipg)
596 call mesh_global_index_to_coords(mesh, ipg, idx)
597 do jj = 1, stencil%size
598 jx = idx + stencil%points(:, jj)
599 jpg = mesh_global_index_from_coords(mesh, jx)
600 if (jpg > 0 .and. jpg <= mesh%np_global) then
601 jpart = part_vec(jpg)
602 if (ipart /= jpart ) nb(ipart, jpart) = .true.
603 end if
604 end do
605 end do
606 safe_deallocate_a(part_vec)
607
608 ! now generate the information of the graph
609
610 safe_allocate(gindex(1:mesh%mpi_grp%size))
611 safe_allocate(gedges(1:count(nb)))
612
613 ! and now generate it
614 iedge = 0
615 do ipart = 1, mesh%mpi_grp%size
616 do jpart = 1, mesh%mpi_grp%size
617 if (nb(ipart, jpart)) then
618 iedge = iedge + 1
619 gedges(iedge) = jpart - 1
620 end if
621 end do
622 gindex(ipart) = iedge
623 end do
624
625 assert(iedge == count(nb))
626
627 reorder = .true.
628 call mpi_graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm, mpi_err)
629
630 ! we have a new communicator
631 call mpi_grp_init(mesh%mpi_grp, graph_comm)
632
633 safe_deallocate_a(nb)
634 safe_deallocate_a(gindex)
635 safe_deallocate_a(gedges)
636
637 end if
638
639 if (optional_default(regenerate, .false.)) call par_vec_end(mesh%pv)
640 call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
641 space, mesh%partition, mesh%pv, namespace)
642
643 ! check the number of ghost neighbours in parallel
644 nnb = 0
645 jpart = mesh%pv%partno
646 do ipart = 1, mesh%pv%npart
647 if (ipart == jpart) cycle
648 if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
649 end do
650 assert(nnb >= 0 .and. nnb < mesh%pv%npart)
651
652 ! Set local point numbers.
653 mesh%np = mesh%pv%np_local
654 mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
655
656 !%Variable PartitionPrint
657 !%Type logical
658 !%Default true
659 !%Section Execution::Parallelization
660 !%Description
661 !% (experimental) If disabled, <tt>Octopus</tt> will not compute
662 !% nor print the partition information, such as local points,
663 !% no. of neighbours, ghost points and boundary points.
664 !%End
665 call parse_variable(namespace, 'PartitionPrint', .true., partition_print)
666
667 if (partition_print) then
668 call mesh_partition_write_info(mesh, namespace=namespace)
669 call mesh_partition_messages_debug(mesh, namespace)
670 end if
671#endif
672
674 end subroutine do_partition
675
676
677 ! ---------------------------------------------------------
679 subroutine mesh_get_vol_pp()
680
681 integer :: jj(space%dim), ip, np
682 real(real64) :: chi(space%dim)
683
685
686 np = 1
687 if (mesh%use_curvilinear) np = mesh%np_part
688 ! If no local point, we should not try to access the arrays
689 if (mesh%np_part == 0) np = 0
690
691 safe_allocate(mesh%vol_pp(1:np))
692
693 do ip = 1, np
694 mesh%vol_pp(ip) = product(mesh%spacing)
695 end do
696
697 do ip = 1, np
698 call mesh_local_index_to_coords(mesh, ip, jj)
699 chi = jj*mesh%spacing
700 mesh%vol_pp(ip) = mesh%vol_pp(ip)*mesh%coord_system%det_Jac(mesh%x(ip, :), chi)
701 end do
702
703 if (mesh%use_curvilinear .or. mesh%np_part == 0) then
704 mesh%volume_element = m_one
705 else
706 mesh%volume_element = mesh%vol_pp(1)
707 end if
708
710 end subroutine mesh_get_vol_pp
711
712 end subroutine mesh_init_stage_3
713
718 subroutine rebalance_array(data_input, data_output, output_sizes)
719 integer(int64), contiguous, intent(in) :: data_input(:)
720 integer(int64), allocatable, intent(out) :: data_output(:)
721 integer, allocatable, optional, intent(out) :: output_sizes(:)
722
723 integer, allocatable :: initial_sizes(:), final_sizes(:)
724 integer(int64), allocatable :: initial_offsets(:), final_offsets(:)
725 integer, allocatable :: scounts(:), sdispls(:)
726 integer, allocatable :: rcounts(:), rdispls(:)
727 integer :: irank
728 integer(int64) :: itmp
729
730 push_sub(rebalance_array)
731
732 ! collect current sizes of distributed array
733 safe_allocate(initial_sizes(0:mpi_world%size-1))
734 call mpi_world%allgather(size(data_input), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
735 safe_allocate(initial_offsets(0:mpi_world%size))
736 initial_offsets(0) = 0
737 do irank = 1, mpi_world%size
738 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
739 end do
740
741 ! now redistribute the arrays
742 ! use block data decomposition of grid indices
743 safe_allocate(final_offsets(0:mpi_world%size))
744 safe_allocate(final_sizes(0:mpi_world%size-1))
745
746 do irank = 0, mpi_world%size
747 final_offsets(irank) = sum(int(initial_sizes, int64)) * irank/mpi_world%size
748 end do
749 do irank = 0, mpi_world%size - 1
750 assert(final_offsets(irank + 1) - final_offsets(irank) < huge(0_int32))
751 final_sizes(irank) = int(final_offsets(irank + 1) - final_offsets(irank), int32)
752 end do
753
754 safe_allocate(scounts(0:mpi_world%size-1))
755 safe_allocate(sdispls(0:mpi_world%size-1))
756 safe_allocate(rcounts(0:mpi_world%size-1))
757 safe_allocate(rdispls(0:mpi_world%size-1))
758 ! determine communication pattern
759 scounts = 0
760 do irank = 0, mpi_world%size - 1
761 ! get overlap of initial and final distribution
762 itmp = min(final_offsets(irank+1), initial_offsets(mpi_world%rank+1)) - &
763 max(final_offsets(irank), initial_offsets(mpi_world%rank))
764 assert(itmp < huge(0_int32))
765 if (itmp < 0) then
766 scounts(irank) = 0
767 else
768 scounts(irank) = int(itmp, int32)
769 end if
770 end do
771 sdispls(0) = 0
772 do irank = 1, mpi_world%size - 1
773 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
774 end do
775 assert(sum(int(scounts, int64)) < huge(0_int32))
776 assert(sum(scounts) == initial_sizes(mpi_world%rank))
777
778 rcounts = 0
779 do irank = 0, mpi_world%size - 1
780 ! get overlap of initial and final distribution
781 itmp = min(final_offsets(mpi_world%rank+1), initial_offsets(irank+1)) - &
782 max(final_offsets(mpi_world%rank), initial_offsets(irank))
783 assert(itmp < huge(0_int32))
784 if (itmp < 0) then
785 rcounts(irank) = 0
786 else
787 rcounts(irank) = int(itmp, int32)
788 end if
789 end do
790 rdispls(0) = 0
791 do irank = 1, mpi_world%size - 1
792 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
793 end do
794 ! check for consistency between sending and receiving
795 assert(sum(rcounts) == final_sizes(mpi_world%rank))
796
797 safe_allocate(data_output(1:final_sizes(mpi_world%rank)))
798 call mpi_world%alltoallv(data_input, scounts, sdispls, mpi_integer8, &
799 data_output, rcounts, rdispls, mpi_integer8)
800
801 ! save final sizes of array if optional argument present
802 if (present(output_sizes)) then
803 safe_allocate(output_sizes(0:mpi_world%size-1))
804 output_sizes(:) = final_sizes(:)
805 end if
806
807 safe_deallocate_a(final_offsets)
808 safe_deallocate_a(final_sizes)
809
810 safe_deallocate_a(scounts)
811 safe_deallocate_a(sdispls)
812 safe_deallocate_a(rcounts)
813 safe_deallocate_a(rdispls)
814
815
816 pop_sub(rebalance_array)
817 end subroutine rebalance_array
818
823 subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
824 type(namespace_t), intent(in) :: namespace
825 class(space_t), intent(in) :: space
826 type(index_t), intent(in) :: idx
827 integer(int64), intent(in) :: grid_to_spatial(:)
828 integer(int64), allocatable, intent(out) :: grid_to_spatial_reordered(:)
829
830 integer :: bsize(space%dim), order, default
831 integer :: nn, idir, ipg, ip, number_of_blocks(space%dim)
832 type(block_t) :: blk
833 integer, parameter :: &
834 ORDER_BLOCKS = 1, &
835 order_original = 2, &
836 order_cube = 3
837 integer :: point(1:space%dim)
838 integer(int64), allocatable :: reorder_indices(:), reorder_recv(:)
839 integer, allocatable :: index_map(:), indices(:)
840 integer(int64), allocatable :: grid_to_spatial_recv(:)
841 integer, allocatable :: initial_sizes(:)
842 integer(int64), allocatable :: initial_offsets(:)
843 integer(int64) :: istart, iend, indstart, indend, spatial_size
844 integer :: irank, local_size, num_recv
845 integer :: iunique, nunique
846 integer :: direction
847 logical :: increase_with_dimension
848
849 integer, allocatable :: scounts(:), sdispls(:), rcounts(:), rdispls(:)
850 integer(int64), allocatable :: spatial_cutoff(:)
851
852 push_sub(reorder_points)
853
854 !%Variable MeshOrder
855 !%Type integer
856 !%Section Execution::Optimization
857 !%Description
858 !% This variable controls how the grid points are mapped to a
859 !% linear array for global arrays. For runs that are parallel
860 !% in domains, the local mesh order may be different (see
861 !% <tt>MeshLocalOrder</tt>).
862 !% The default is blocks when serial in domains and cube when
863 !% parallel in domains with the local mesh order set to blocks.
864 !%Option order_blocks 1
865 !% The grid is mapped using small parallelepipedic grids. The size
866 !% of the blocks is controlled by <tt>MeshBlockSize</tt>.
867 !%Option order_original 2
868 !% The original order of the indices is used to map the grid.
869 !%Option order_cube 3
870 !% The grid is mapped using a full cube, i.e. without blocking.
871 !%End
872 default = order_blocks
873 call parse_variable(namespace, 'MeshOrder', default, order)
874 ! no reordering in 1D necessary
875 if (space%dim == 1) then
876 order = order_original
877 end if
878
879 !%Variable MeshBlockDirection
880 !%Type integer
881 !%Section Execution::Optimization
882 !%Description
883 !% Determines the direction in which the dimensions are chosen to compute
884 !% the blocked index for sorting the mesh points (see MeshBlockSize).
885 !% The default is increase_with_dimensions, corresponding to xyz ordering
886 !% in 3D.
887 !%Option increase_with_dimension 1
888 !% The fastest changing index is in the first dimension, i.e., in 3D this
889 !% corresponds to ordering in xyz directions.
890 !%Option decrease_with_dimension 2
891 !% The fastest changing index is in the last dimension, i.e., in 3D this
892 !% corresponds to ordering in zyx directions.
893 !%End
894 call parse_variable(namespace, 'MeshBlockDirection', 1, direction)
895 increase_with_dimension = direction == 1
896 if (direction /= 1 .and. direction /= 2) then
897 call messages_input_error(namespace, 'MeshBlockDirection')
898 end if
899
900 select case (order)
901 case (order_original)
902 ! only copy points, they stay in their original ordering
903 safe_allocate(grid_to_spatial_reordered(1:size(grid_to_spatial)))
904 grid_to_spatial_reordered(1:size(grid_to_spatial)) = grid_to_spatial(1:size(grid_to_spatial))
905 case (order_blocks, order_cube)
906 if (order == order_cube) then
907 bsize = idx%nr(2, :) - idx%nr(1, :) + 1
908 else
909 !%Variable MeshBlockSize
910 !%Type block
911 !%Section Execution::Optimization
912 !%Description
913 !% To improve memory-access locality when calculating derivatives,
914 !% <tt>Octopus</tt> arranges mesh points in blocks. This variable
915 !% controls the size of this blocks in the different
916 !% directions. The default is selected according to the value of
917 !% the <tt>StatesBlockSize</tt> variable. (This variable only affects the
918 !% performance of <tt>Octopus</tt> and not the results.)
919 !%End
920 if (conf%target_states_block_size < 16) then
921 bsize(1) = 80 * 4 / abs(conf%target_states_block_size)
922 if (space%dim > 1) bsize(2) = 4
923 if (space%dim > 2) bsize(3:) = 10
924 else
925 bsize(1) = max(4 * 16 / abs(conf%target_states_block_size), 1)
926 if (space%dim > 1) bsize(2) = 15
927 if (space%dim > 2) bsize(3:) = 15
928 end if
929
930 if (parse_block(namespace, 'MeshBlockSize', blk) == 0) then
931 nn = parse_block_cols(blk, 0)
932 if (nn /= space%dim) then
933 message(1) = "Error: number of entries in MeshBlockSize must match the number of dimensions."
934 call messages_fatal(1, namespace=namespace)
935 end if
936 do idir = 1, nn
937 call parse_block_integer(blk, 0, idir - 1, bsize(idir))
938 end do
939 end if
940 end if
941
942 number_of_blocks = (idx%nr(2, :) - idx%nr(1, :) + 1) / bsize + 1
943
944
945 ! do the global reordering in parallel, use block data decomposition of global indices
946 ! reorder indices along blocked parallelepiped curve
947
948 ! collect current sizes of distributed array
949 safe_allocate(initial_sizes(0:mpi_world%size-1))
950 call mpi_world%allgather(size(grid_to_spatial), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
951 safe_allocate(initial_offsets(0:mpi_world%size))
952 initial_offsets(0) = 0
953 do irank = 1, mpi_world%size
954 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
955 end do
956
957 ! get local range and size
958 istart = initial_offsets(mpi_world%rank)
959 iend = initial_offsets(mpi_world%rank + 1) - 1
960 assert(iend - istart + 1 < huge(0_int32))
961 local_size = int(iend - istart + 1, int32)
962 assert(local_size == initial_sizes(mpi_world%rank))
963
964 ! compute new indices locally
965 safe_allocate(reorder_indices(1:local_size))
966 safe_allocate(indices(1:local_size))
967 safe_allocate(grid_to_spatial_reordered(1:local_size))
968 !$omp parallel do private(point)
969 do ip = 1, local_size
970 call index_spatial_to_point(idx, space%dim, grid_to_spatial(ip), point)
971 point = point + idx%offset
972 reorder_indices(ip) = get_blocked_index(space%dim, point, bsize, number_of_blocks, increase_with_dimension)
973 end do
974 ! parallel sort according to the new indices
975 ! sort the local array
976 call sort(reorder_indices, indices)
977 ! save reordered indices to send to other processes
978 !$omp parallel do
979 do ip = 1, local_size
980 grid_to_spatial_reordered(ip) = grid_to_spatial(indices(ip))
981 end do
982
983 ! get minimum and maximum
984 if(local_size > 0) then
985 indstart = reorder_indices(1)
986 indend = reorder_indices(local_size)
987 else
988 indstart = huge(1_int64)
989 indend = 0
990 end if
991 call mpi_world%allreduce_inplace(indstart, 1, mpi_integer8, mpi_min)
992 call mpi_world%allreduce_inplace(indend, 1, mpi_integer8, mpi_max)
993 spatial_size = indend - indstart + 1
994
995 ! get index ranges for each rank
996 safe_allocate(spatial_cutoff(0:mpi_world%size-1))
997 do irank = 0, mpi_world%size - 1
998 spatial_cutoff(irank) = spatial_size * (irank+1)/mpi_world%size + indstart
999 end do
1000
1001 safe_allocate(scounts(0:mpi_world%size-1))
1002 safe_allocate(sdispls(0:mpi_world%size-1))
1003 safe_allocate(rcounts(0:mpi_world%size-1))
1004 safe_allocate(rdispls(0:mpi_world%size-1))
1005 ! get send counts
1006 scounts = 0
1007 irank = 0
1008 ! the indices are ordered, so we can go through them and increase
1009 ! the rank to which they are associated to when we cross a cutoff
1010 do ip = 1, local_size
1011 if (reorder_indices(ip) >= spatial_cutoff(irank)) then
1012 ! this do loop is needed in case some ranks do not have any points
1013 do while (reorder_indices(ip) >= spatial_cutoff(irank))
1014 irank = irank + 1
1015 end do
1016 assert(irank < mpi_world%size)
1017 end if
1018 scounts(irank) = scounts(irank) + 1
1019 end do
1020 safe_deallocate_a(spatial_cutoff)
1021 assert(sum(scounts) == local_size)
1022
1023 ! compute communication pattern (sdispls, rcounts, rdispls)
1024 sdispls(0) = 0
1025 do irank = 1, mpi_world%size - 1
1026 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
1027 end do
1028
1029 call mpi_world%alltoall(scounts, 1, mpi_integer, &
1030 rcounts, 1, mpi_integer)
1031
1032 rdispls(0) = 0
1033 do irank = 1, mpi_world%size - 1
1034 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
1035 end do
1036
1037 ! make sure the arrays get allocated also if we do not receive anything
1038 num_recv = max(sum(rcounts), 1)
1039 ! communicate the locally sorted indices
1040 safe_allocate(reorder_recv(1:num_recv))
1041 call mpi_world%alltoallv(reorder_indices, scounts, sdispls, mpi_integer8, &
1042 reorder_recv, rcounts, rdispls, mpi_integer8)
1043 safe_deallocate_a(reorder_indices)
1044
1045 ! communicate the corresponding spatial indices
1046 safe_allocate(grid_to_spatial_recv(1:num_recv))
1047 call mpi_world%alltoallv(grid_to_spatial_reordered, scounts, sdispls, mpi_integer8, &
1048 grid_to_spatial_recv, rcounts, rdispls, mpi_integer8)
1049 safe_deallocate_a(grid_to_spatial_reordered)
1050
1051 ! do k-way merge of sorted indices
1052 safe_allocate(reorder_indices(1:num_recv))
1053 safe_allocate(index_map(1:num_recv))
1054 if (sum(rcounts) > 0) then
1055 call merge_sorted_arrays(reorder_recv, rcounts, reorder_indices, index_map)
1056
1057 ! get number of unique indices, needed for boundary
1058 nunique = 1
1059 do ipg = 2, sum(rcounts)
1060 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1061 nunique = nunique + 1
1062 end if
1063 end do
1064
1065 ! reorder according to new order, but remove duplicate entries
1066 safe_allocate(grid_to_spatial_reordered(1:nunique))
1067 iunique = 1
1068 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(1))
1069 do ipg = 2, sum(rcounts)
1070 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1071 iunique = iunique + 1
1072 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(ipg))
1073 end if
1074 end do
1075 else
1076 safe_allocate(grid_to_spatial_reordered(1:0))
1077 end if
1078
1079 safe_deallocate_a(initial_offsets)
1080 safe_deallocate_a(initial_sizes)
1081
1082 safe_deallocate_a(reorder_indices)
1083 safe_deallocate_a(reorder_recv)
1084
1085 safe_deallocate_a(grid_to_spatial_recv)
1086 safe_deallocate_a(index_map)
1087 safe_deallocate_a(indices)
1088
1089 safe_deallocate_a(scounts)
1090 safe_deallocate_a(sdispls)
1091 safe_deallocate_a(rcounts)
1092 safe_deallocate_a(rdispls)
1093
1094 end select
1095 pop_sub(reorder_points)
1096 end subroutine reorder_points
1097
1100 subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
1101 integer, intent(in) :: local_size
1102 integer, allocatable, intent(out) :: sizes(:)
1103 integer(int64), allocatable, intent(out) :: offsets(:)
1104 type(mpi_grp_t), optional, intent(in) :: mpi_grp
1105
1106 integer :: irank
1107 type(mpi_grp_t) :: mpi_grp_
1108
1109 push_sub(get_sizes_offsets)
1110
1111 if (present(mpi_grp)) then
1112 mpi_grp_ = mpi_grp
1113 else
1114 mpi_grp_ = mpi_world
1115 end if
1116
1117 safe_allocate(sizes(0:mpi_grp_%size-1))
1118 call mpi_grp_%allgather(local_size, 1, mpi_integer, sizes(0), 1, mpi_integer)
1119 safe_allocate(offsets(0:mpi_grp_%size))
1120 offsets(0) = 0
1121 do irank = 1, mpi_grp_%size
1122 offsets(irank) = offsets(irank-1) + sizes(irank-1)
1123 end do
1124
1125 pop_sub(get_sizes_offsets)
1126 end subroutine get_sizes_offsets
1127
1128end module mesh_init_oct_m
1129
1130!! Local Variables:
1131!! mode: f90
1132!! coding: utf-8
1133!! End:
Box bounds along some axes.
Definition: box.F90:178
This is the common interface to a sorting routine. It performs the shell algorithm,...
Definition: sort.F90:149
double log(double __x) __attribute__((__nothrow__
subroutine do_partition()
Definition: mesh_init.F90:593
subroutine mesh_get_vol_pp()
calculate the volume of integration
Definition: mesh_init.F90:773
real(real64), parameter, public box_boundary_delta
Definition: box.F90:132
This module handles the calculation mode.
integer, parameter, public p_strategy_domains
parallelization in domains
real(real64), parameter, public m_two
Definition: global.F90:190
subroutine, public global_end()
Finalise parser varinfo file, and MPI.
Definition: global.F90:382
real(real64), parameter, public m_zero
Definition: global.F90:188
type(conf_t), public conf
Global instance of Octopus configuration.
Definition: global.F90:178
real(real64), parameter, public m_one
Definition: global.F90:189
This module implements a simple hash table for non-negative integer keys and integer values.
Definition: iihash.F90:125
integer function, public lihash_lookup(h, key, found)
Look up a value in the hash table h. If found is present, it indicates if key could be found in the t...
Definition: iihash.F90:334
subroutine, public lihash_insert(h, key, val)
Insert a (key, val) pair into the hash table h.
Definition: iihash.F90:308
subroutine, public lihash_end(h)
Free a hash table.
Definition: iihash.F90:286
subroutine, public lihash_init(h)
Initialize a hash table h.
Definition: iihash.F90:263
This module implements the index, used for the mesh points.
Definition: index.F90:122
subroutine, public index_init(idx, dim)
This subroutine allocates memory and initializes some components.
Definition: index.F90:218
integer(int64) function, public get_blocked_index(dim, point, bsize, number_of_blocks, increase_with_dimensions)
Definition: index.F90:497
subroutine, public index_spatial_to_point(idx, dim, ispatial, point)
Definition: index.F90:398
integer, parameter, public idx_hilbert
Definition: index.F90:167
subroutine, public index_point_to_spatial(idx, dim, ispatial, point)
Definition: index.F90:416
integer, parameter, public idx_cubic
Definition: index.F90:167
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:115
subroutine, public merge_sorted_arrays(array, sizes, merged, index_map)
This module contains subroutines, related to the initialization of the mesh.
Definition: mesh_init.F90:117
subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
reorder the points in the grid according to the variables MeshOrder and MeshLocalOrder
Definition: mesh_init.F90:917
subroutine rebalance_array(data_input, data_output, output_sizes)
re-distribute the points to improve load balancing
Definition: mesh_init.F90:812
subroutine, public mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
When running parallel in domains, stencil and np_stencil are needed to compute the ghost points....
Definition: mesh_init.F90:538
subroutine, public mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
First stage mesh initialization.
Definition: mesh_init.F90:170
subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
return the sizes and offsets of a distributed array for all tasks of a mpi group.
Definition: mesh_init.F90:1194
subroutine, public mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
This subroutine creates the global array of spatial indices and the inverse mapping.
Definition: mesh_init.F90:290
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:118
subroutine, public mesh_global_index_to_coords(mesh, ipg, ix)
Given a global point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:929
integer(int64) function, public mesh_global_index_from_coords(mesh, ix)
This function returns the true global index of the point for a given vector of integer coordinates.
Definition: mesh.F90:920
subroutine, public mesh_local_index_to_coords(mesh, ip, ix)
Given a local point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:951
integer(int64) function, public mesh_local2global(mesh, ip)
This function returns the global mesh index for a given local index.
Definition: mesh.F90:963
real(real64) function, dimension(1:mesh%box%dim), public mesh_x_global(mesh, ipg)
Definition: mesh.F90:808
subroutine, public mesh_partition_from_parent(mesh, parent)
create a mesh partition from a given parent mesh
subroutine, public mesh_partition_write_info(mesh, iunit, namespace)
subroutine, public mesh_partition_messages_debug(mesh, namespace)
subroutine, public mesh_partition(mesh, namespace, space, lapl_stencil, vsize)
This routine converts the mesh given by grid points into a graph.
subroutine, public mesh_partition_dump(restart, mesh, vsize, ierr)
subroutine, public mesh_partition_load(restart, mesh, ierr)
subroutine, public messages_end()
Definition: messages.F90:277
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:537
subroutine, public print_date(str)
Definition: messages.F90:1005
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:414
subroutine, public messages_input_error(namespace, var, details, row, column)
Definition: messages.F90:713
This module contains some common usage patterns of MPI routines.
Definition: mpi_lib.F90:115
subroutine, public lmpi_sync_shared_memory_window(window, intranode_grp)
Definition: mpi_lib.F90:193
subroutine, public create_intranode_communicator(base_grp, intranode_grp, internode_grp)
Definition: mpi_lib.F90:205
logical function mpi_grp_is_root(grp)
Is the current MPI process of grpcomm, root.
Definition: mpi.F90:430
type(mpi_grp_t), public mpi_world
Definition: mpi.F90:266
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:346
integer, public mpi_err
used to store return values of mpi calls
Definition: mpi.F90:269
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:145
Some general things and nomenclature:
Definition: par_vec.F90:171
subroutine, public par_vec_end(pv)
Deallocate memory used by pv.
Definition: par_vec.F90:744
subroutine, public par_vec_init(mpi_grp, np_global, idx, stencil, space, partition, pv, namespace)
Initializes a par_vec_type object (parallel vector).
Definition: par_vec.F90:294
integer function, public parse_block(namespace, name, blk, check_varinfo_)
Definition: parser.F90:618
subroutine, public partition_get_global(partition, part_global, root)
Returns the global partition. If root is present, the partition is gathered only in that node....
Definition: partition.F90:395
subroutine, public profiling_end(namespace)
Definition: profiling.F90:413
integer, parameter, public restart_partition
Definition: restart.F90:200
subroutine, public restart_init(restart, namespace, data_type, type, mc, ierr, mesh, dir, exact)
Initializes a restart object.
Definition: restart.F90:516
integer, parameter, public restart_type_dump
Definition: restart.F90:245
integer, parameter, public restart_type_load
Definition: restart.F90:245
subroutine, public restart_end(restart)
Definition: restart.F90:722
This module is intended to contain "only mathematical" functions and procedures.
Definition: sort.F90:117
This module defines stencils used in Octopus.
Definition: stencil.F90:135
This module is intended to contain simple general-purpose utility functions and procedures.
Definition: utils.F90:118
subroutine, public make_array_larger(array, new_size)
Definition: utils.F90:629
class to tell whether a point is inside or outside
Definition: box.F90:141
Describes mesh distribution to nodes.
Definition: mesh.F90:186
This is defined even when running serial.
Definition: mpi.F90:142
Stores all communicators and groups.
Definition: multicomm.F90:206
The class representing the stencil, which is used for non-local mesh operations.
Definition: stencil.F90:163
int true(void)