Octopus
mesh_init.F90
Go to the documentation of this file.
1!! Copyright (C) 2002-2006 M. Marques, A. Castro, A. Rubio, G. Bertsch
2!! Copyright (C) 2021 S. Ohlmann
3!!
4!! This program is free software; you can redistribute it and/or modify
5!! it under the terms of the GNU General Public License as published by
6!! the Free Software Foundation; either version 2, or (at your option)
7!! any later version.
8!!
9!! This program is distributed in the hope that it will be useful,
10!! but WITHOUT ANY WARRANTY; without even the implied warranty of
11!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12!! GNU General Public License for more details.
13!!
14!! You should have received a copy of the GNU General Public License
15!! along with this program; if not, write to the Free Software
16!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17!! 02110-1301, USA.
18!!
19
20#include "global.h"
21
24module mesh_init_oct_m
26 use box_oct_m
30 use debug_oct_m
31 use global_oct_m
32 use iihash_oct_m
33 use index_oct_m
34 use math_oct_m
36 use mesh_oct_m
40 use mpi_oct_m
45 use parser_oct_m
49 use sort_oct_m
50 use space_oct_m
52 use utils_oct_m
53
54 implicit none
55
56 private
57 public :: &
61
62
63 integer, parameter :: INNER_POINT = 1
64 integer, parameter :: ENLARGEMENT_POINT = 2
65 integer, parameter :: BOUNDARY = -1
66
67contains
68
69! ---------------------------------------------------------
76 subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
77 class(mesh_t), intent(inout) :: mesh
78 type(namespace_t), intent(in) :: namespace
79 class(space_t), intent(in) :: space
80 class(box_t), target, intent(in) :: box
81 class(coordinate_system_t), target, intent(in) :: coord_system
82 real(real64), intent(in) :: spacing(1:space%dim)
83 integer, intent(in) :: enlarge(1:space%dim)
84
85 integer :: idir, jj, delta
86 real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
87 logical :: out
88
89 push_sub_with_profile(mesh_init_stage_1)
90
91 mesh%box => box
92
93 safe_allocate(mesh%spacing(1:space%dim))
94 mesh%spacing = spacing ! this number can change in the following
95
96 mesh%use_curvilinear = coord_system%local_basis
97 mesh%coord_system => coord_system
98
99 call index_init(mesh%idx, space%dim)
100 mesh%idx%enlarge = enlarge
101
102 ! get box bounds along the axes that generate the grid points
103 select type (coord_system)
104 class is (affine_coordinates_t)
105 box_bounds = box%bounds(coord_system%basis)
106 class default
107 box_bounds = box%bounds()
108 end select
109
110 ! adjust nr
111 do idir = 1, space%dim
112 chi = m_zero
113 ! the upper border
114 jj = 0
115 out = .false.
116 do while(.not.out)
117 jj = jj + 1
118 chi(idir) = real(jj, real64) * mesh%spacing(idir)
119 if (mesh%use_curvilinear) then
120 x = coord_system%to_cartesian(chi)
121 out = x(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
122 else
123 ! do the same comparison here as in simul_box_contains_points
124 out = chi(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
125 end if
126 end do
127 mesh%idx%nr(2, idir) = jj - 1
128 end do
129
130 ! we have a symmetric mesh (for now)
131 mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
132
133 ! we have to adjust a couple of things for the periodic directions
134 do idir = 1, space%periodic_dim
135 if (mesh%idx%nr(2, idir) == 0) then
136 ! this happens if Spacing > box size
137 mesh%idx%nr(2, idir) = 1
138 mesh%idx%nr(1, idir) = -1
139 end if
140
141 ! We have to adjust the spacing to be commensurate with the box,
142 ! for this we scan the possible values of the grid size around the
143 ! one we selected. We choose the size that has the spacing closest
144 ! to the requested one.
145 do delta = -1, 1
146 spacing_new(delta) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
147 spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
148 end do
149
150 delta = minloc(spacing_new, dim = 1) - 2
151
152 assert(delta >= -1)
153 assert(delta <= 1)
154
155 mesh%spacing(idir) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
157 ! we need to adjust the grid by adding or removing one point
158 if (delta == -1) then
159 mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
160 else if (delta == 1) then
161 mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
162 end if
163
164 end do
165
166 if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) ) then
167 call messages_write('The spacing has been modified to make it commensurate with the periodicity of the system.')
168 call messages_warning()
169 end if
170
171 do idir = space%periodic_dim + 1, space%dim
172 if (mesh%idx%nr(2, idir) == 0) then
173 write(message(1),'(a,i2)') 'Spacing > box size in direction ', idir
174 call messages_fatal(1, namespace=namespace)
175 end if
176 end do
177
178 mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
179 ! compute strides for cubic indices
180 mesh%idx%stride(:) = 1
181 do idir = 2, space%dim+1
182 mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
183 (mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
184 end do
185
186 pop_sub_with_profile(mesh_init_stage_1)
187 end subroutine mesh_init_stage_1
188
189 ! ---------------------------------------------------------
195 !
196 subroutine mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
197 class(mesh_t), intent(inout) :: mesh
198 type(namespace_t), intent(in) :: namespace
199 class(space_t), intent(in) :: space
200 class(box_t), intent(in) :: box
201 type(stencil_t), intent(in) :: stencil
202 logical, optional, intent(in) :: regenerate
203
204 integer :: is
205 real(real64) :: chi(1:space%dim)
206 real(real64) :: pos(space%dim)
207 integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
208 integer(int64) :: global_size
209 integer(int32) :: local_size
210 integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
211 integer :: ip, ib, ib2, np, np_boundary, ii
212 logical :: found
213 type(lihash_t) :: spatial_to_boundary
214 integer(int64), allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
215 integer(int64), allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
216 integer(int64), allocatable :: spatial_to_grid(:)
217 integer, allocatable :: sizes(:)
218 integer(int64), allocatable :: offsets(:)
219 integer :: size_boundary
220#ifdef HAVE_MPI
221 integer(int64), pointer :: ptr(:)
222 type(mpi_grp_t) :: internode_grp, intranode_grp
223#endif
224
225 push_sub_with_profile(mesh_init_stage_2)
226
227 if (.not. optional_default(regenerate, .false.)) then
228 ! enlarge mesh for boundary points
229 mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
230 mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
231 end if
232
233 !%Variable MeshIndexType
234 !%Type integer
235 !%Default idx_cubic
236 !%Section Mesh
237 !%Description
238 !% Determine index type. Must be the same for restarting a calculation.
239 !%Option idx_cubic 1
240 !% Cubic indices are used to map the spatial information to the grid points.
241 !%Option idx_hilbert 2
242 !% A Hilbert space-filling curve is used to map the spatial information to
243 !% the grid points.
244 !%End
245 call parse_variable(namespace, 'MeshIndexType', idx_cubic, mesh%idx%type)
246
247 grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
248 mesh%idx%offset = grid_sizes/2
249 if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64)))) then
250 write(message(1), '(A, I10, A, I2, A)') "Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
251 " points in one direction for ", space%dim, " dimensions. This is not supported."
252 call messages_fatal(1, namespace=namespace)
253 end if
254 global_size = product(int(grid_sizes, int64))
255 ! compute the bits per dimension: grid_sizes(i) <= 2**bits
256 mesh%idx%bits = maxval(ceiling(log(real(grid_sizes, real64) )/log(2.)))
257
258 if (mesh%idx%type == idx_cubic) then
259 spatial_size = global_size
260 else if (mesh%idx%type == idx_hilbert) then
261 spatial_size = 2**(space%dim*mesh%idx%bits)
262 end if
263
264 ! use block data decomposition of spatial indices
265 istart = spatial_size * mpi_world%rank/mpi_world%size
266 iend = spatial_size * (mpi_world%rank+1)/mpi_world%size - 1
267 if (.not. (iend - istart + 1 < huge(0_int32))) then
268 write(message(1), '(A, I10, A, I2, A)') "Error: local grid too large, more than ", &
269 huge(0_int32), " points. This is not supported. Maybe use more MPI ranks?"
270 call messages_fatal(1, namespace=namespace)
271 end if
272 local_size = int(iend - istart + 1, int32)
273
274 safe_allocate(grid_to_spatial_initial(1:local_size))
275
276 ! get inner grid indices
277 ip = 1
278 do ispatial = istart, iend
279 call index_spatial_to_point(mesh%idx, space%dim, ispatial, point)
280 ! first check if point is outside bounding box
281 if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
282 if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
283 ! then check if point is inside simulation box
284 chi = real(point, real64) * mesh%spacing
285 pos = mesh%coord_system%to_cartesian(chi)
286 if (.not. box%contains_point(pos)) cycle
287 grid_to_spatial_initial(ip) = ispatial
288 assert(ip + 1 < huge(ip))
289 ip = ip + 1
290 end do
291 np = ip - 1
292
293 call rebalance_array(grid_to_spatial_initial(1:np), grid_to_spatial, sizes)
294 np = sizes(mpi_world%rank)
295
296 safe_deallocate_a(grid_to_spatial_initial)
297
298 safe_allocate(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
299 safe_deallocate_a(sizes)
300
301 !$omp parallel do
302 do ispatial = grid_to_spatial(1), grid_to_spatial(np)
303 spatial_to_grid(ispatial) = -1
304 end do
305 !$omp parallel do
306 do ip = 1, np
307 spatial_to_grid(grid_to_spatial(ip)) = ip
308 end do
309
310 ! get local boundary indices
311 call lihash_init(spatial_to_boundary)
312 size_boundary = np
313 safe_allocate(boundary_to_spatial(1:size_boundary))
314 ib = 1
315 do ip = 1, np
316 call index_spatial_to_point(mesh%idx, space%dim, grid_to_spatial(ip), point)
317 do is = 1, stencil%size
318 if (stencil%center == is) cycle
319 point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
320 ! check if point is in inner part
321 call index_point_to_spatial(mesh%idx, space%dim, ispatialb, point_stencil)
322 assert(ispatialb >= 0)
323 if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
324 ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64)) then
325 if (spatial_to_grid(ispatialb) > 0) cycle
326 end if
327 ! then check if point is inside simulation box
328 chi = real(point_stencil, real64) * mesh%spacing
329 pos = mesh%coord_system%to_cartesian(chi)
330 if (box%contains_point(pos)) cycle
331 ! it has to be a boundary point now
332 ! check if already counted
333 ib2 = lihash_lookup(spatial_to_boundary, ispatialb, found)
334 if (found) cycle
335 boundary_to_spatial(ib) = ispatialb
336 call lihash_insert(spatial_to_boundary, ispatialb, ib)
337 ib = ib + 1
338 ! enlarge array
339 if (ib >= size_boundary) then
340 size_boundary = size_boundary * 2
341 call make_array_larger(boundary_to_spatial, size_boundary)
342 end if
343 end do
344 end do
345 np_boundary = ib - 1
346 call lihash_end(spatial_to_boundary)
347 safe_deallocate_a(spatial_to_grid)
348
349 ! reorder inner points
350 call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
351 safe_deallocate_a(grid_to_spatial)
352
353 call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
354 np = sizes(mpi_world%rank)
355 mesh%np_global = sizes(0)
356 do ii = 1, mpi_world%size - 1
357 mesh%np_global = mesh%np_global + sizes(ii)
358 end do
359 safe_deallocate_a(sizes)
360 safe_deallocate_a(grid_to_spatial_reordered)
361
362 ! reorder boundary points
363 call make_array_larger(boundary_to_spatial, np_boundary)
364 call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
365 safe_deallocate_a(boundary_to_spatial)
366
367 call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
368 safe_deallocate_a(boundary_to_spatial_reordered)
369
370 ! global grid size
371 np_boundary = sizes(mpi_world%rank)
372 mesh%np_part_global = mesh%np_global + sizes(0)
373 do ii = 1, mpi_world%size - 1
374 mesh%np_part_global = mesh%np_part_global + sizes(ii)
375 end do
376 safe_deallocate_a(sizes)
377
378
379 ! get global indices
380#ifdef HAVE_MPI
381 ! create shared memory window and fill it only on root
382 call create_intranode_communicator(mpi_world, intranode_grp, internode_grp)
383 call lmpi_create_shared_memory_window(mesh%np_part_global, intranode_grp, &
384 mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
385#else
386 safe_allocate(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
387#endif
388 ! inner grid
389 call get_sizes_offsets(np, sizes, offsets)
390 call mpi_world%gatherv(grid_to_spatial, np, mpi_integer8, &
391 mesh%idx%grid_to_spatial_global, sizes, offsets, mpi_integer8, 0)
392
393 ! boundary indices
394 call get_sizes_offsets(np_boundary, sizes, offsets)
395 call mpi_world%gatherv(boundary_to_spatial, np_boundary, mpi_integer8, &
396 mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, mpi_integer8, 0)
397
398 ! fill global hash map
399#ifdef HAVE_MPI
400 ! create shared memory window and fill it only on root
401 call lmpi_create_shared_memory_window(spatial_size, intranode_grp, &
402 mesh%idx%window_spatial_to_grid, ptr)
403 mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
404#else
405 safe_allocate(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
406#endif
407 if (mpi_grp_is_root(mpi_world)) then
408 ! fill only on root, then broadcast
409 !$omp parallel do
410 do ispatial = 0, spatial_size-1
411 mesh%idx%spatial_to_grid_global(ispatial) = 0
412 end do
413 !$omp parallel do
414 do ipg = 1, mesh%np_part_global
415 mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
416 end do
417 end if
418
419#ifdef HAVE_MPI
420 ! now broadcast the global arrays to local rank 0 on each node
421 if (intranode_grp%rank == 0) then
422 call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, mpi_integer8, 0)
423 call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, mpi_integer8, 0)
424 end if
425 call lmpi_sync_shared_memory_window(mesh%idx%window_grid_to_spatial, intranode_grp)
426 call lmpi_sync_shared_memory_window(mesh%idx%window_spatial_to_grid, intranode_grp)
427#endif
428
429 safe_deallocate_a(offsets)
430 safe_deallocate_a(sizes)
431
432 safe_deallocate_a(boundary_to_spatial)
433 safe_deallocate_a(grid_to_spatial)
434
435 pop_sub_with_profile(mesh_init_stage_2)
436 end subroutine mesh_init_stage_2
437
438! ---------------------------------------------------------
443! ---------------------------------------------------------
444 subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
445 class(mesh_t), intent(inout) :: mesh
446 type(namespace_t), intent(in) :: namespace
447 class(space_t), intent(in) :: space
448 type(stencil_t), intent(in) :: stencil
449 type(multicomm_t), intent(in) :: mc
450 type(mesh_t), optional, intent(in) :: parent
451 logical, optional, intent(in) :: regenerate
452
453 integer :: ip, jj(space%dim), np
454
455 push_sub_with_profile(mesh_init_stage_3)
456
457 call mpi_grp_init(mesh%mpi_grp, mc%group_comm(p_strategy_domains))
458
459 ! check if we are running in parallel in domains
460 mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
461
462 call checksum_calculate(1, mesh%np_part_global, mesh%idx%grid_to_spatial_global(1), &
463 mesh%idx%checksum)
464
465 if (mesh%parallel_in_domains) then
466 call do_partition()
467 else
468 ! When running serially those two are the same.
469 assert(mesh%np_part_global < huge(mesh%np_part))
470 mesh%np = i8_to_i4(mesh%np_global)
471 mesh%np_part = i8_to_i4(mesh%np_part_global)
472
473 ! These must be initialized for par_vec_gather, par_vec_scatter to work
474 ! as copy operations when running without domain parallelization.
475 mesh%pv%np_global = mesh%np_global
476 mesh%pv%np_ghost = 0
477 mesh%pv%np_bndry = mesh%np_part - mesh%np
478 mesh%pv%npart = 1
479 mesh%pv%xlocal = 1
480 end if
481
482 ! Compute mesh%x
483 safe_allocate(mesh%x(1:mesh%np_part, 1:space%dim))
484 !$omp parallel do
485 do ip = 1, mesh%np_part
486 mesh%x(ip, 1:space%dim) = m_zero
487 end do
488
489 do ip = 1, mesh%np_part
490 mesh%x(ip, 1:space%dim) = mesh_x_global(mesh, mesh_local2global(mesh, ip))
491 end do
492
493 ! save chi, i.e. primitive coordinates
494 safe_allocate(mesh%chi(1:space%dim, 1:mesh%np_part))
495 do ip = 1, mesh%np_part
496 call mesh_local_index_to_coords(mesh, ip, jj)
497 mesh%chi(:, ip) = jj*mesh%spacing
498 end do
499
500 call mesh_get_vol_pp()
501
502 ! save inverse jacobian
503 if (mesh%coord_system%local_basis) then
504 np = mesh%np_part
505 else
506 np = 1
507 end if
508 safe_allocate(mesh%jacobian_inverse(1:space%dim, 1:space%dim, np))
509 do ip = 1, np
510 mesh%jacobian_inverse(:, :, ip) = mesh%coord_system%jacobian_inverse(mesh%chi(:, ip))
511 end do
512
513 pop_sub_with_profile(mesh_init_stage_3)
514
515 contains
516 ! ---------------------------------------------------------
517 subroutine do_partition()
518#ifdef HAVE_MPI
519 integer :: jj, ipart, jpart
520 integer(int64) :: ipg
521 integer, allocatable :: gindex(:), gedges(:)
522 logical, allocatable :: nb(:, :)
523 integer :: idx(space%dim), jx(space%dim)
524 type(mpi_comm) :: graph_comm
525 integer :: iedge, nnb
526 logical :: use_topo, reorder, partition_print
527 integer :: ierr
528
529 logical :: has_virtual_partition = .false.
530 integer :: vsize
531 type(restart_t) :: restart_load, restart_dump
532 integer, allocatable :: part_vec(:)
533
535
536 !Try to load the partition from the restart files
537 if (.not. optional_default(regenerate, .false.)) then
538 call restart_init(restart_load, namespace, restart_partition, restart_type_load, mc, ierr, mesh=mesh, exact=.true.)
539 if (ierr == 0) call mesh_partition_load(restart_load, mesh, ierr)
540 call restart_end(restart_load)
541 else
542 ierr = 0
543 end if
544
545 if (ierr /= 0) then
546
547 !%Variable MeshPartitionVirtualSize
548 !%Type integer
549 !%Default mesh mpi_grp size
550 !%Section Execution::Parallelization
551 !%Description
552 !% Gives the possibility to change the partition nodes.
553 !% Afterward, it crashes.
554 !%End
555 call parse_variable(namespace, 'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
556
557 if (vsize /= mesh%mpi_grp%size) then
558 write(message(1),'(a,I7)') "Changing the partition size to", vsize
559 write(message(2),'(a)') "The execution will crash."
560 call messages_warning(2, namespace=namespace)
561 has_virtual_partition = .true.
562 else
563 has_virtual_partition = .false.
564 end if
565
566 if (.not. present(parent)) then
567 call mesh_partition(mesh, namespace, space, stencil, vsize)
568 else
569 ! if there is a parent grid, use its partition
570 call mesh_partition_from_parent(mesh, parent)
571 end if
572
573 !Now that we have the partitions, we save them
574 call restart_init(restart_dump, namespace, restart_partition, restart_type_dump, mc, ierr, mesh=mesh)
575 call mesh_partition_dump(restart_dump, mesh, vsize, ierr)
576 call restart_end(restart_dump)
577 end if
578
579 if (has_virtual_partition) then
580 call profiling_end(namespace)
581 call print_date("Calculation ended on ")
582 write(message(1),'(a)') "Execution has ended."
583 write(message(2),'(a)') "If you want to run your system, do not use MeshPartitionVirtualSize."
584 call messages_warning(2, namespace=namespace)
585 call messages_end()
586 call global_end()
587 stop
588 end if
589
590 !%Variable MeshUseTopology
591 !%Type logical
592 !%Default false
593 !%Section Execution::Parallelization
594 !%Description
595 !% (experimental) If enabled, <tt>Octopus</tt> will use an MPI virtual
596 !% topology to map the processors. This can improve performance
597 !% for certain interconnection systems.
598 !%End
599 call parse_variable(namespace, 'MeshUseTopology', .false., use_topo)
600
601 if (use_topo) then
602 ! At the moment we still need the global partition. This will be removed in near future.
603 safe_allocate(part_vec(1:mesh%np_part_global))
604 call partition_get_global(mesh%partition, part_vec(1:mesh%np_global))
605
606
607 ! generate a table of neighbours
608
609 safe_allocate(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
610 nb = .false.
611
612 do ipg = 1, mesh%np_global
613 ipart = part_vec(ipg)
614 call mesh_global_index_to_coords(mesh, ipg, idx)
615 do jj = 1, stencil%size
616 jx = idx + stencil%points(:, jj)
617 if (all(jx >= mesh%idx%nr(1, :)) .and. all(jx <= mesh%idx%nr(2, :))) then
618 jpart = part_vec(mesh_global_index_from_coords(mesh, jx))
619 if (ipart /= jpart ) nb(ipart, jpart) = .true.
620 end if
621 end do
622 end do
623 safe_deallocate_a(part_vec)
624
625 ! now generate the information of the graph
626
627 safe_allocate(gindex(1:mesh%mpi_grp%size))
628 safe_allocate(gedges(1:count(nb)))
629
630 ! and now generate it
631 iedge = 0
632 do ipart = 1, mesh%mpi_grp%size
633 do jpart = 1, mesh%mpi_grp%size
634 if (nb(ipart, jpart)) then
635 iedge = iedge + 1
636 gedges(iedge) = jpart - 1
637 end if
638 end do
639 gindex(ipart) = iedge
640 end do
641
642 assert(iedge == count(nb))
643
644 reorder = .true.
645 call mpi_graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm, mpi_err)
646
647 ! we have a new communicator
648 call mpi_grp_init(mesh%mpi_grp, graph_comm)
649
650 safe_deallocate_a(nb)
651 safe_deallocate_a(gindex)
652 safe_deallocate_a(gedges)
653
654 end if
655
656 if (optional_default(regenerate, .false.)) call par_vec_end(mesh%pv)
657 call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
658 space, mesh%partition, mesh%pv, namespace)
659
660 ! check the number of ghost neighbours in parallel
661 nnb = 0
662 jpart = mesh%pv%partno
663 do ipart = 1, mesh%pv%npart
664 if (ipart == jpart) cycle
665 if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
666 end do
667 assert(nnb >= 0 .and. nnb < mesh%pv%npart)
668
669 ! Set local point numbers.
670 mesh%np = mesh%pv%np_local
671 mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
672
673 !%Variable PartitionPrint
674 !%Type logical
675 !%Default true
676 !%Section Execution::Parallelization
677 !%Description
678 !% (experimental) If disabled, <tt>Octopus</tt> will not compute
679 !% nor print the partition information, such as local points,
680 !% no. of neighbours, ghost points and boundary points.
681 !%End
682 call parse_variable(namespace, 'PartitionPrint', .true., partition_print)
683
684 if (partition_print) then
685 call mesh_partition_write_info(mesh, namespace=namespace)
686 call mesh_partition_messages_debug(mesh, namespace)
687 end if
688#endif
689
691 end subroutine do_partition
692
693
694 ! ---------------------------------------------------------
696 subroutine mesh_get_vol_pp()
697
698 integer :: ip, np
699
701
702 np = 1
703 if (mesh%use_curvilinear) np = mesh%np_part
704 ! If no local point, we should not try to access the arrays
705 if (mesh%np_part == 0) np = 0
706
707 safe_allocate(mesh%vol_pp(1:np))
708
709 do ip = 1, np
710 mesh%vol_pp(ip) = product(mesh%spacing)
711 end do
712
713 do ip = 1, np
714 mesh%vol_pp(ip) = mesh%vol_pp(ip)*mesh%coord_system%jacobian_determinant(mesh%chi(:, ip))
715 end do
716
717 if (mesh%use_curvilinear .or. mesh%np_part == 0) then
718 mesh%volume_element = m_one
719 else
720 mesh%volume_element = mesh%vol_pp(1)
721 end if
722
724 end subroutine mesh_get_vol_pp
725
726 end subroutine mesh_init_stage_3
727
732 subroutine rebalance_array(data_input, data_output, output_sizes)
733 integer(int64), contiguous, intent(in) :: data_input(:)
734 integer(int64), allocatable, intent(out) :: data_output(:)
735 integer, allocatable, optional, intent(out) :: output_sizes(:)
736
737 integer, allocatable :: initial_sizes(:), final_sizes(:)
738 integer(int64), allocatable :: initial_offsets(:), final_offsets(:)
739 integer, allocatable :: scounts(:), sdispls(:)
740 integer, allocatable :: rcounts(:), rdispls(:)
741 integer :: irank
742 integer(int64) :: itmp
743
744 push_sub(rebalance_array)
745
746 ! collect current sizes of distributed array
747 safe_allocate(initial_sizes(0:mpi_world%size-1))
748 call mpi_world%allgather(size(data_input), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
749 safe_allocate(initial_offsets(0:mpi_world%size))
750 initial_offsets(0) = 0
751 do irank = 1, mpi_world%size
752 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
753 end do
754
755 ! now redistribute the arrays
756 ! use block data decomposition of grid indices
757 safe_allocate(final_offsets(0:mpi_world%size))
758 safe_allocate(final_sizes(0:mpi_world%size-1))
759
760 do irank = 0, mpi_world%size
761 final_offsets(irank) = sum(int(initial_sizes, int64)) * irank/mpi_world%size
762 end do
763 do irank = 0, mpi_world%size - 1
764 assert(final_offsets(irank + 1) - final_offsets(irank) < huge(0_int32))
765 final_sizes(irank) = int(final_offsets(irank + 1) - final_offsets(irank), int32)
766 end do
767
768 safe_allocate(scounts(0:mpi_world%size-1))
769 safe_allocate(sdispls(0:mpi_world%size-1))
770 safe_allocate(rcounts(0:mpi_world%size-1))
771 safe_allocate(rdispls(0:mpi_world%size-1))
772 ! determine communication pattern
773 scounts = 0
774 do irank = 0, mpi_world%size - 1
775 ! get overlap of initial and final distribution
776 itmp = min(final_offsets(irank+1), initial_offsets(mpi_world%rank+1)) - &
777 max(final_offsets(irank), initial_offsets(mpi_world%rank))
778 assert(itmp < huge(0_int32))
779 if (itmp < 0) then
780 scounts(irank) = 0
781 else
782 scounts(irank) = int(itmp, int32)
783 end if
784 end do
785 sdispls(0) = 0
786 do irank = 1, mpi_world%size - 1
787 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
788 end do
789 assert(sum(int(scounts, int64)) < huge(0_int32))
790 assert(sum(scounts) == initial_sizes(mpi_world%rank))
791
792 rcounts = 0
793 do irank = 0, mpi_world%size - 1
794 ! get overlap of initial and final distribution
795 itmp = min(final_offsets(mpi_world%rank+1), initial_offsets(irank+1)) - &
796 max(final_offsets(mpi_world%rank), initial_offsets(irank))
797 assert(itmp < huge(0_int32))
798 if (itmp < 0) then
799 rcounts(irank) = 0
800 else
801 rcounts(irank) = int(itmp, int32)
802 end if
803 end do
804 rdispls(0) = 0
805 do irank = 1, mpi_world%size - 1
806 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
807 end do
808 ! check for consistency between sending and receiving
809 assert(sum(rcounts) == final_sizes(mpi_world%rank))
810
811 safe_allocate(data_output(1:final_sizes(mpi_world%rank)))
812 call mpi_world%alltoallv(data_input, scounts, sdispls, mpi_integer8, &
813 data_output, rcounts, rdispls, mpi_integer8)
814
815 ! save final sizes of array if optional argument present
816 if (present(output_sizes)) then
817 safe_allocate(output_sizes(0:mpi_world%size-1))
818 output_sizes(:) = final_sizes(:)
819 end if
820
821 safe_deallocate_a(final_offsets)
822 safe_deallocate_a(final_sizes)
823
824 safe_deallocate_a(scounts)
825 safe_deallocate_a(sdispls)
826 safe_deallocate_a(rcounts)
827 safe_deallocate_a(rdispls)
828
829
830 pop_sub(rebalance_array)
831 end subroutine rebalance_array
832
837 subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
838 type(namespace_t), intent(in) :: namespace
839 class(space_t), intent(in) :: space
840 type(index_t), intent(in) :: idx
841 integer(int64), intent(in) :: grid_to_spatial(:)
842 integer(int64), allocatable, intent(out) :: grid_to_spatial_reordered(:)
843
844 integer :: bsize(space%dim), order, default
845 integer :: nn, idir, ipg, ip, number_of_blocks(space%dim)
846 type(block_t) :: blk
847 integer, parameter :: &
848 ORDER_BLOCKS = 1, &
849 order_original = 2, &
850 order_cube = 3
851 integer :: point(1:space%dim)
852 integer(int64), allocatable :: reorder_indices(:), reorder_recv(:)
853 integer, allocatable :: index_map(:), indices(:)
854 integer(int64), allocatable :: grid_to_spatial_recv(:)
855 integer, allocatable :: initial_sizes(:)
856 integer(int64), allocatable :: initial_offsets(:)
857 integer(int64) :: istart, iend, indstart, indend, spatial_size
858 integer :: irank, local_size, num_recv
859 integer :: iunique, nunique
860 integer :: direction
861 logical :: increase_with_dimension
862
863 integer, allocatable :: scounts(:), sdispls(:), rcounts(:), rdispls(:)
864 integer(int64), allocatable :: spatial_cutoff(:)
865
866 push_sub(reorder_points)
867
868 !%Variable MeshOrder
869 !%Type integer
870 !%Section Execution::Optimization
871 !%Description
872 !% This variable controls how the grid points are mapped to a
873 !% linear array for global arrays. For runs that are parallel
874 !% in domains, the local mesh order may be different (see
875 !% <tt>MeshLocalOrder</tt>).
876 !% The default is blocks when serial in domains and cube when
877 !% parallel in domains with the local mesh order set to blocks.
878 !%Option order_blocks 1
879 !% The grid is mapped using small parallelepipedic grids. The size
880 !% of the blocks is controlled by <tt>MeshBlockSize</tt>.
881 !%Option order_original 2
882 !% The original order of the indices is used to map the grid.
883 !%Option order_cube 3
884 !% The grid is mapped using a full cube, i.e. without blocking.
885 !%End
886 default = order_blocks
887 call parse_variable(namespace, 'MeshOrder', default, order)
888 ! no reordering in 1D necessary
889 if (space%dim == 1) then
890 order = order_original
891 end if
892
893 !%Variable MeshBlockDirection
894 !%Type integer
895 !%Section Execution::Optimization
896 !%Description
897 !% Determines the direction in which the dimensions are chosen to compute
898 !% the blocked index for sorting the mesh points (see MeshBlockSize).
899 !% The default is increase_with_dimensions, corresponding to xyz ordering
900 !% in 3D.
901 !%Option increase_with_dimension 1
902 !% The fastest changing index is in the first dimension, i.e., in 3D this
903 !% corresponds to ordering in xyz directions.
904 !%Option decrease_with_dimension 2
905 !% The fastest changing index is in the last dimension, i.e., in 3D this
906 !% corresponds to ordering in zyx directions.
907 !%End
908 call parse_variable(namespace, 'MeshBlockDirection', 1, direction)
909 increase_with_dimension = direction == 1
910 if (direction /= 1 .and. direction /= 2) then
911 call messages_input_error(namespace, 'MeshBlockDirection')
912 end if
913
914 select case (order)
915 case (order_original)
916 ! only copy points, they stay in their original ordering
917 safe_allocate(grid_to_spatial_reordered(1:size(grid_to_spatial)))
918 grid_to_spatial_reordered(1:size(grid_to_spatial)) = grid_to_spatial(1:size(grid_to_spatial))
919 case (order_blocks, order_cube)
920 if (order == order_cube) then
921 bsize = idx%nr(2, :) - idx%nr(1, :) + 1
922 else
923 !%Variable MeshBlockSize
924 !%Type block
925 !%Section Execution::Optimization
926 !%Description
927 !% To improve memory-access locality when calculating derivatives,
928 !% <tt>Octopus</tt> arranges mesh points in blocks. This variable
929 !% controls the size of this blocks in the different
930 !% directions. The default is selected according to the value of
931 !% the <tt>StatesBlockSize</tt> variable. (This variable only affects the
932 !% performance of <tt>Octopus</tt> and not the results.)
933 !%End
934 if (conf%target_states_block_size < 16) then
935 bsize(1) = 80 * 4 / abs(conf%target_states_block_size)
936 if (space%dim > 1) bsize(2) = 4
937 if (space%dim > 2) bsize(3:) = 10
938 else
939 bsize(1) = max(4 * 16 / abs(conf%target_states_block_size), 1)
940 if (space%dim > 1) bsize(2) = 15
941 if (space%dim > 2) bsize(3:) = 15
942 end if
943
944 if (parse_block(namespace, 'MeshBlockSize', blk) == 0) then
945 nn = parse_block_cols(blk, 0)
946 if (nn /= space%dim) then
947 message(1) = "Error: number of entries in MeshBlockSize must match the number of dimensions."
948 call messages_fatal(1, namespace=namespace)
949 end if
950 do idir = 1, nn
951 call parse_block_integer(blk, 0, idir - 1, bsize(idir))
952 end do
953 end if
954 end if
955
956 number_of_blocks = (idx%nr(2, :) - idx%nr(1, :) + 1) / bsize + 1
957
958
959 ! do the global reordering in parallel, use block data decomposition of global indices
960 ! reorder indices along blocked parallelepiped curve
961
962 ! collect current sizes of distributed array
963 safe_allocate(initial_sizes(0:mpi_world%size-1))
964 call mpi_world%allgather(size(grid_to_spatial), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
965 safe_allocate(initial_offsets(0:mpi_world%size))
966 initial_offsets(0) = 0
967 do irank = 1, mpi_world%size
968 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
969 end do
970
971 ! get local range and size
972 istart = initial_offsets(mpi_world%rank)
973 iend = initial_offsets(mpi_world%rank + 1) - 1
974 assert(iend - istart + 1 < huge(0_int32))
975 local_size = int(iend - istart + 1, int32)
976 assert(local_size == initial_sizes(mpi_world%rank))
977
978 ! compute new indices locally
979 safe_allocate(reorder_indices(1:local_size))
980 safe_allocate(indices(1:local_size))
981 safe_allocate(grid_to_spatial_reordered(1:local_size))
982 !$omp parallel do private(point)
983 do ip = 1, local_size
984 call index_spatial_to_point(idx, space%dim, grid_to_spatial(ip), point)
985 point = point + idx%offset
986 reorder_indices(ip) = get_blocked_index(space%dim, point, bsize, number_of_blocks, increase_with_dimension)
987 end do
988 ! parallel sort according to the new indices
989 ! sort the local array
990 call sort(reorder_indices, indices)
991 ! save reordered indices to send to other processes
992 !$omp parallel do
993 do ip = 1, local_size
994 grid_to_spatial_reordered(ip) = grid_to_spatial(indices(ip))
995 end do
996
997 ! get minimum and maximum
998 indstart = reorder_indices(1)
999 indend = reorder_indices(local_size)
1000 call mpi_world%allreduce_inplace(indstart, 1, mpi_integer8, mpi_min)
1001 call mpi_world%allreduce_inplace(indend, 1, mpi_integer8, mpi_max)
1002 spatial_size = indend - indstart + 1
1003
1004 ! get index ranges for each rank
1005 safe_allocate(spatial_cutoff(0:mpi_world%size-1))
1006 do irank = 0, mpi_world%size - 1
1007 spatial_cutoff(irank) = spatial_size * (irank+1)/mpi_world%size + indstart
1008 end do
1009
1010 safe_allocate(scounts(0:mpi_world%size-1))
1011 safe_allocate(sdispls(0:mpi_world%size-1))
1012 safe_allocate(rcounts(0:mpi_world%size-1))
1013 safe_allocate(rdispls(0:mpi_world%size-1))
1014 ! get send counts
1015 scounts = 0
1016 irank = 0
1017 ! the indices are ordered, so we can go through them and increase
1018 ! the rank to which they are associated to when we cross a cutoff
1019 do ip = 1, local_size
1020 if (reorder_indices(ip) >= spatial_cutoff(irank)) then
1021 ! this do loop is needed in case some ranks do not have any points
1022 do while (reorder_indices(ip) >= spatial_cutoff(irank))
1023 irank = irank + 1
1024 end do
1025 assert(irank < mpi_world%size)
1026 end if
1027 scounts(irank) = scounts(irank) + 1
1028 end do
1029 safe_deallocate_a(spatial_cutoff)
1030 assert(sum(scounts) == local_size)
1031
1032 ! compute communication pattern (sdispls, rcounts, rdispls)
1033 sdispls(0) = 0
1034 do irank = 1, mpi_world%size - 1
1035 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
1036 end do
1037
1038 call mpi_world%alltoall(scounts, 1, mpi_integer, &
1039 rcounts, 1, mpi_integer)
1040
1041 rdispls(0) = 0
1042 do irank = 1, mpi_world%size - 1
1043 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
1044 end do
1045
1046 ! make sure the arrays get allocated also if we do not receive anything
1047 num_recv = max(sum(rcounts), 1)
1048 ! communicate the locally sorted indices
1049 safe_allocate(reorder_recv(1:num_recv))
1050 call mpi_world%alltoallv(reorder_indices, scounts, sdispls, mpi_integer8, &
1051 reorder_recv, rcounts, rdispls, mpi_integer8)
1052 safe_deallocate_a(reorder_indices)
1053
1054 ! communicate the corresponding spatial indices
1055 safe_allocate(grid_to_spatial_recv(1:num_recv))
1056 call mpi_world%alltoallv(grid_to_spatial_reordered, scounts, sdispls, mpi_integer8, &
1057 grid_to_spatial_recv, rcounts, rdispls, mpi_integer8)
1058 safe_deallocate_a(grid_to_spatial_reordered)
1059
1060 ! do k-way merge of sorted indices
1061 safe_allocate(reorder_indices(1:num_recv))
1062 safe_allocate(index_map(1:num_recv))
1063 if (sum(rcounts) > 0) then
1064 call merge_sorted_arrays(reorder_recv, rcounts, reorder_indices, index_map)
1065
1066 ! get number of unique indices, needed for boundary
1067 nunique = 1
1068 do ipg = 2, sum(rcounts)
1069 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1070 nunique = nunique + 1
1071 end if
1072 end do
1073
1074 ! reorder according to new order, but remove duplicate entries
1075 safe_allocate(grid_to_spatial_reordered(1:nunique))
1076 iunique = 1
1077 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(1))
1078 do ipg = 2, sum(rcounts)
1079 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1080 iunique = iunique + 1
1081 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(ipg))
1082 end if
1083 end do
1084 else
1085 safe_allocate(grid_to_spatial_reordered(1:0))
1086 end if
1087
1088 safe_deallocate_a(initial_offsets)
1089 safe_deallocate_a(initial_sizes)
1090
1091 safe_deallocate_a(reorder_indices)
1092 safe_deallocate_a(reorder_recv)
1093
1094 safe_deallocate_a(grid_to_spatial_recv)
1095 safe_deallocate_a(index_map)
1096 safe_deallocate_a(indices)
1097
1098 safe_deallocate_a(scounts)
1099 safe_deallocate_a(sdispls)
1100 safe_deallocate_a(rcounts)
1101 safe_deallocate_a(rdispls)
1102
1103 end select
1104 pop_sub(reorder_points)
1105 end subroutine reorder_points
1106
1109 subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
1110 integer, intent(in) :: local_size
1111 integer, allocatable, intent(out) :: sizes(:)
1112 integer(int64), allocatable, intent(out) :: offsets(:)
1113 type(mpi_grp_t), optional, intent(in) :: mpi_grp
1114
1115 integer :: irank
1116 type(mpi_grp_t) :: mpi_grp_
1117
1118 push_sub(get_sizes_offsets)
1119
1120 if (present(mpi_grp)) then
1121 mpi_grp_ = mpi_grp
1122 else
1123 mpi_grp_ = mpi_world
1124 end if
1125
1126 safe_allocate(sizes(0:mpi_grp_%size-1))
1127 call mpi_grp_%allgather(local_size, 1, mpi_integer, sizes(0), 1, mpi_integer)
1128 safe_allocate(offsets(0:mpi_grp_%size))
1129 offsets(0) = 0
1130 do irank = 1, mpi_grp_%size
1131 offsets(irank) = offsets(irank-1) + sizes(irank-1)
1132 end do
1133
1134 pop_sub(get_sizes_offsets)
1135 end subroutine get_sizes_offsets
1136
1137end module mesh_init_oct_m
1138
1139!! Local Variables:
1140!! mode: f90
1141!! coding: utf-8
1142!! End:
Box bounds along some axes.
Definition: box.F90:178
This is the common interface to a sorting routine. It performs the shell algorithm,...
Definition: sort.F90:149
double log(double __x) __attribute__((__nothrow__
subroutine do_partition()
Definition: mesh_init.F90:611
subroutine mesh_get_vol_pp()
calculate the volume of integration
Definition: mesh_init.F90:790
real(real64), parameter, public box_boundary_delta
Definition: box.F90:132
This module handles the calculation mode.
integer, parameter, public p_strategy_domains
parallelization in domains
real(real64), parameter, public m_two
Definition: global.F90:190
subroutine, public global_end()
Finalise parser varinfo file, and MPI.
Definition: global.F90:382
real(real64), parameter, public m_zero
Definition: global.F90:188
type(conf_t), public conf
Global instance of Octopus configuration.
Definition: global.F90:178
real(real64), parameter, public m_one
Definition: global.F90:189
This module implements a simple hash table for non-negative integer keys and integer values.
Definition: iihash.F90:125
integer function, public lihash_lookup(h, key, found)
Look up a value in the hash table h. If found is present, it indicates if key could be found in the t...
Definition: iihash.F90:334
subroutine, public lihash_insert(h, key, val)
Insert a (key, val) pair into the hash table h.
Definition: iihash.F90:308
subroutine, public lihash_end(h)
Free a hash table.
Definition: iihash.F90:286
subroutine, public lihash_init(h)
Initialize a hash table h.
Definition: iihash.F90:263
This module implements the index, used for the mesh points.
Definition: index.F90:122
subroutine, public index_init(idx, dim)
This subroutine allocates memory and initializes some components.
Definition: index.F90:218
integer(int64) function, public get_blocked_index(dim, point, bsize, number_of_blocks, increase_with_dimensions)
Definition: index.F90:497
subroutine, public index_spatial_to_point(idx, dim, ispatial, point)
Definition: index.F90:398
integer, parameter, public idx_hilbert
Definition: index.F90:167
subroutine, public index_point_to_spatial(idx, dim, ispatial, point)
Definition: index.F90:416
integer, parameter, public idx_cubic
Definition: index.F90:167
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:115
subroutine, public merge_sorted_arrays(array, sizes, merged, index_map)
This module contains subroutines, related to the initialization of the mesh.
Definition: mesh_init.F90:117
subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
reorder the points in the grid according to the variables MeshOrder and MeshLocalOrder
Definition: mesh_init.F90:931
subroutine rebalance_array(data_input, data_output, output_sizes)
re-distribute the points to improve load balancing
Definition: mesh_init.F90:826
subroutine, public mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
When running parallel in domains, stencil and np_stencil are needed to compute the ghost points....
Definition: mesh_init.F90:538
subroutine, public mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
First stage mesh initialization.
Definition: mesh_init.F90:170
subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
return the sizes and offsets of a distributed array for all tasks of a mpi group.
Definition: mesh_init.F90:1203
subroutine, public mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
This subroutine creates the global array of spatial indices and the inverse mapping.
Definition: mesh_init.F90:290
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:118
subroutine, public mesh_global_index_to_coords(mesh, ipg, ix)
Given a global point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:929
integer(int64) function, public mesh_global_index_from_coords(mesh, ix)
This function returns the true global index of the point for a given vector of integer coordinates.
Definition: mesh.F90:920
subroutine, public mesh_local_index_to_coords(mesh, ip, ix)
Given a local point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:951
integer(int64) function, public mesh_local2global(mesh, ip)
This function returns the global mesh index for a given local index.
Definition: mesh.F90:963
real(real64) function, dimension(1:mesh%box%dim), public mesh_x_global(mesh, ipg)
Definition: mesh.F90:808
subroutine, public mesh_partition_from_parent(mesh, parent)
create a mesh partition from a given parent mesh
subroutine, public mesh_partition_write_info(mesh, iunit, namespace)
subroutine, public mesh_partition_messages_debug(mesh, namespace)
subroutine, public mesh_partition(mesh, namespace, space, lapl_stencil, vsize)
This routine converts the mesh given by grid points into a graph.
subroutine, public mesh_partition_dump(restart, mesh, vsize, ierr)
subroutine, public mesh_partition_load(restart, mesh, ierr)
subroutine, public messages_end()
Definition: messages.F90:277
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:537
subroutine, public print_date(str)
Definition: messages.F90:1005
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:414
subroutine, public messages_input_error(namespace, var, details, row, column)
Definition: messages.F90:713
This module contains some common usage patterns of MPI routines.
Definition: mpi_lib.F90:115
subroutine, public lmpi_sync_shared_memory_window(window, intranode_grp)
Definition: mpi_lib.F90:193
subroutine, public create_intranode_communicator(base_grp, intranode_grp, internode_grp)
Definition: mpi_lib.F90:205
logical function mpi_grp_is_root(grp)
Is the current MPI process of grpcomm, root.
Definition: mpi.F90:434
type(mpi_grp_t), public mpi_world
Definition: mpi.F90:270
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:350
integer, public mpi_err
used to store return values of mpi calls
Definition: mpi.F90:273
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:145
Some general things and nomenclature:
Definition: par_vec.F90:171
subroutine, public par_vec_end(pv)
Deallocate memory used by pv.
Definition: par_vec.F90:744
subroutine, public par_vec_init(mpi_grp, np_global, idx, stencil, space, partition, pv, namespace)
Initializes a par_vec_type object (parallel vector).
Definition: par_vec.F90:294
integer function, public parse_block(namespace, name, blk, check_varinfo_)
Definition: parser.F90:618
subroutine, public partition_get_global(partition, part_global, root)
Returns the global partition. If root is present, the partition is gathered only in that node....
Definition: partition.F90:395
subroutine, public profiling_end(namespace)
Definition: profiling.F90:413
integer, parameter, public restart_partition
Definition: restart.F90:200
subroutine, public restart_init(restart, namespace, data_type, type, mc, ierr, mesh, dir, exact)
Initializes a restart object.
Definition: restart.F90:517
integer, parameter, public restart_type_dump
Definition: restart.F90:246
integer, parameter, public restart_type_load
Definition: restart.F90:246
subroutine, public restart_end(restart)
Definition: restart.F90:723
This module is intended to contain "only mathematical" functions and procedures.
Definition: sort.F90:117
This module defines stencils used in Octopus.
Definition: stencil.F90:135
This module is intended to contain simple general-purpose utility functions and procedures.
Definition: utils.F90:118
subroutine, public make_array_larger(array, new_size)
Definition: utils.F90:629
class to tell whether a point is inside or outside
Definition: box.F90:141
Describes mesh distribution to nodes.
Definition: mesh.F90:186
This is defined even when running serial.
Definition: mpi.F90:142
Stores all communicators and groups.
Definition: multicomm.F90:206
The class representing the stencil, which is used for non-local mesh operations.
Definition: stencil.F90:163
int true(void)