Octopus
mesh_init.F90
Go to the documentation of this file.
1!! Copyright (C) 2002-2006 M. Marques, A. Castro, A. Rubio, G. Bertsch
2!! Copyright (C) 2021 S. Ohlmann
3!!
4!! This program is free software; you can redistribute it and/or modify
5!! it under the terms of the GNU General Public License as published by
6!! the Free Software Foundation; either version 2, or (at your option)
7!! any later version.
8!!
9!! This program is distributed in the hope that it will be useful,
10!! but WITHOUT ANY WARRANTY; without even the implied warranty of
11!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12!! GNU General Public License for more details.
13!!
14!! You should have received a copy of the GNU General Public License
15!! along with this program; if not, write to the Free Software
16!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17!! 02110-1301, USA.
18!!
19
20#include "global.h"
21
24module mesh_init_oct_m
26 use box_oct_m
30 use debug_oct_m
31 use global_oct_m
32 use iihash_oct_m
33 use index_oct_m
34 use math_oct_m
36 use mesh_oct_m
40 use mpi_oct_m
45 use parser_oct_m
49 use sort_oct_m
50 use space_oct_m
52 use utils_oct_m
53
54 implicit none
55
56 private
57 public :: &
61
62
63 integer, parameter :: INNER_POINT = 1
64 integer, parameter :: ENLARGEMENT_POINT = 2
65 integer, parameter :: BOUNDARY = -1
66
67contains
68
69! ---------------------------------------------------------
76 subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
77 class(mesh_t), intent(inout) :: mesh
78 type(namespace_t), intent(in) :: namespace
79 class(space_t), intent(in) :: space
80 class(box_t), target, intent(in) :: box
81 class(coordinate_system_t), target, intent(in) :: coord_system
82 real(real64), intent(in) :: spacing(1:space%dim)
83 integer, intent(in) :: enlarge(1:space%dim)
84
85 integer :: idir, jj, delta
86 real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
87 logical :: out
88
89 push_sub_with_profile(mesh_init_stage_1)
90
91 mesh%box => box
92
93 safe_allocate(mesh%spacing(1:space%dim))
94 mesh%spacing = spacing ! this number can change in the following
95
96 mesh%use_curvilinear = coord_system%local_basis
97 mesh%coord_system => coord_system
98
99 call index_init(mesh%idx, space%dim)
100 mesh%idx%enlarge = enlarge
101
102 ! get box bounds along the axes that generate the grid points
103 select type (coord_system)
104 class is (affine_coordinates_t)
105 box_bounds = box%bounds(coord_system%basis)
106 class default
107 box_bounds = box%bounds()
108 end select
109
110 ! adjust nr
111 do idir = 1, space%dim
112 chi = m_zero
113 ! the upper border
114 jj = 0
115 out = .false.
116 do while(.not.out)
117 jj = jj + 1
118 chi(idir) = real(jj, real64) * mesh%spacing(idir)
119 if (mesh%use_curvilinear) then
120 x = coord_system%to_cartesian(chi)
121 out = x(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
122 else
123 ! do the same comparison here as in simul_box_contains_points
124 out = chi(idir) > maxval(abs(box_bounds(:, idir))) + box_boundary_delta
125 end if
126 end do
127 mesh%idx%nr(2, idir) = jj - 1
128 end do
129
130 ! we have a symmetric mesh (for now)
131 mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
132
133 ! we have to adjust a couple of things for the periodic directions
134 do idir = 1, space%periodic_dim
135 if (mesh%idx%nr(2, idir) == 0) then
136 ! this happens if Spacing > box size
137 mesh%idx%nr(2, idir) = 1
138 mesh%idx%nr(1, idir) = -1
139 end if
140
141 ! We have to adjust the spacing to be commensurate with the box,
142 ! for this we scan the possible values of the grid size around the
143 ! one we selected. We choose the size that has the spacing closest
144 ! to the requested one.
145 do delta = -1, 1
146 spacing_new(delta) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
147 spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
148 end do
149
150 delta = minloc(spacing_new, dim = 1) - 2
151
152 assert(delta >= -1)
153 assert(delta <= 1)
154
155 mesh%spacing(idir) = m_two*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
156
157 ! we need to adjust the grid by adding or removing one point
158 if (delta == -1) then
159 mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
160 else if (delta == 1) then
161 mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
162 end if
163
164 end do
165
166 if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) ) then
167 call messages_write('The spacing has been modified to make it commensurate with the periodicity of the system.')
168 call messages_warning()
169 end if
170
171 do idir = space%periodic_dim + 1, space%dim
172 if (mesh%idx%nr(2, idir) == 0) then
173 write(message(1),'(a,i2)') 'Spacing > box size in direction ', idir
174 call messages_fatal(1, namespace=namespace)
175 end if
176 end do
177
178 mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
179 ! compute strides for cubic indices
180 mesh%idx%stride(:) = 1
181 do idir = 2, space%dim+1
182 mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
183 (mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
184 end do
185
186 pop_sub_with_profile(mesh_init_stage_1)
187 end subroutine mesh_init_stage_1
188
189 ! ---------------------------------------------------------
195 !
196 subroutine mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
197 class(mesh_t), intent(inout) :: mesh
198 type(namespace_t), intent(in) :: namespace
199 class(space_t), intent(in) :: space
200 class(box_t), intent(in) :: box
201 type(stencil_t), intent(in) :: stencil
202 logical, optional, intent(in) :: regenerate
203
204 integer :: is
205 real(real64) :: chi(1:space%dim)
206 real(real64) :: pos(space%dim)
207 integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
208 integer(int64) :: global_size
209 integer(int32) :: local_size
210 integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
211 integer :: ip, ib, ib2, np, np_boundary, ii
212 logical :: found
213 type(lihash_t) :: spatial_to_boundary
214 integer(int64), allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
215 integer(int64), allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
216 integer(int64), allocatable :: spatial_to_grid(:)
217 integer, allocatable :: sizes(:)
218 integer(int64), allocatable :: offsets(:)
219 integer :: size_boundary
220#ifdef HAVE_MPI
221 integer(int64), pointer :: ptr(:)
222 type(mpi_grp_t) :: internode_grp, intranode_grp
223#endif
224
225 push_sub_with_profile(mesh_init_stage_2)
226
227 if (.not. optional_default(regenerate, .false.)) then
228 ! enlarge mesh for boundary points
229 mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
230 mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
231 end if
232
233 !%Variable MeshIndexType
234 !%Type integer
235 !%Default idx_cubic
236 !%Section Mesh
237 !%Description
238 !% Determine index type. Must be the same for restarting a calculation.
239 !%Option idx_cubic 1
240 !% Cubic indices are used to map the spatial information to the grid points.
241 !%Option idx_hilbert 2
242 !% A Hilbert space-filling curve is used to map the spatial information to
243 !% the grid points.
244 !%End
245 call parse_variable(namespace, 'MeshIndexType', idx_cubic, mesh%idx%type)
246
247 grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
248 mesh%idx%offset = grid_sizes/2
249 if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64)))) then
250 write(message(1), '(A, I10, A, I2, A)') "Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
251 " points in one direction for ", space%dim, " dimensions. This is not supported."
252 call messages_fatal(1, namespace=namespace)
253 end if
254 global_size = product(int(grid_sizes, int64))
255 ! compute the bits per dimension: grid_sizes(i) <= 2**bits
256 mesh%idx%bits = maxval(ceiling(log(real(grid_sizes, real64) )/log(2.)))
257
258 if (mesh%idx%type == idx_cubic) then
259 spatial_size = global_size
260 else if (mesh%idx%type == idx_hilbert) then
261 spatial_size = 2**(space%dim*mesh%idx%bits)
262 end if
263
264 ! use block data decomposition of spatial indices
265 istart = spatial_size * mpi_world%rank/mpi_world%size
266 iend = spatial_size * (mpi_world%rank+1)/mpi_world%size - 1
267 if (.not. (iend - istart + 1 < huge(0_int32))) then
268 write(message(1), '(A, I10, A, I2, A)') "Error: local grid too large, more than ", &
269 huge(0_int32), " points. This is not supported. Maybe use more MPI ranks?"
270 call messages_fatal(1, namespace=namespace)
271 end if
272 local_size = int(iend - istart + 1, int32)
273
274 safe_allocate(grid_to_spatial_initial(1:local_size))
275
276 ! get inner grid indices
277 ip = 1
278 do ispatial = istart, iend
279 call index_spatial_to_point(mesh%idx, space%dim, ispatial, point)
280 ! first check if point is outside bounding box
281 if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
282 if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
283 ! then check if point is inside simulation box
284 chi = real(point, real64) * mesh%spacing
285 pos = mesh%coord_system%to_cartesian(chi)
286 if (.not. box%contains_point(pos)) cycle
287 grid_to_spatial_initial(ip) = ispatial
288 assert(ip + 1 < huge(ip))
289 ip = ip + 1
290 end do
291 np = ip - 1
292
293 call rebalance_array(grid_to_spatial_initial(1:np), grid_to_spatial, sizes)
294 np = sizes(mpi_world%rank)
295
296 safe_deallocate_a(grid_to_spatial_initial)
297
298 safe_allocate(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
299 safe_deallocate_a(sizes)
300
301 !$omp parallel do
302 do ispatial = grid_to_spatial(1), grid_to_spatial(np)
303 spatial_to_grid(ispatial) = -1
304 end do
305 !$omp parallel do
306 do ip = 1, np
307 spatial_to_grid(grid_to_spatial(ip)) = ip
308 end do
309
310 ! get local boundary indices
311 call lihash_init(spatial_to_boundary)
312 size_boundary = np
313 safe_allocate(boundary_to_spatial(1:size_boundary))
314 ib = 1
315 do ip = 1, np
316 call index_spatial_to_point(mesh%idx, space%dim, grid_to_spatial(ip), point)
317 do is = 1, stencil%size
318 if (stencil%center == is) cycle
319 point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
320 ! check if point is in inner part
321 call index_point_to_spatial(mesh%idx, space%dim, ispatialb, point_stencil)
322 assert(ispatialb >= 0)
323 if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
324 ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64)) then
325 if (spatial_to_grid(ispatialb) > 0) cycle
326 end if
327 ! then check if point is inside simulation box
328 chi = real(point_stencil, real64) * mesh%spacing
329 pos = mesh%coord_system%to_cartesian(chi)
330 if (box%contains_point(pos)) cycle
331 ! it has to be a boundary point now
332 ! check if already counted
333 ib2 = lihash_lookup(spatial_to_boundary, ispatialb, found)
334 if (found) cycle
335 boundary_to_spatial(ib) = ispatialb
336 call lihash_insert(spatial_to_boundary, ispatialb, ib)
337 ib = ib + 1
338 ! enlarge array
339 if (ib >= size_boundary) then
340 size_boundary = size_boundary * 2
341 call make_array_larger(boundary_to_spatial, size_boundary)
342 end if
343 end do
344 end do
345 np_boundary = ib - 1
346 call lihash_end(spatial_to_boundary)
347 safe_deallocate_a(spatial_to_grid)
348
349 ! reorder inner points
350 call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
351 safe_deallocate_a(grid_to_spatial)
352
353 call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
354 np = sizes(mpi_world%rank)
355 mesh%np_global = sizes(0)
356 do ii = 1, mpi_world%size - 1
357 mesh%np_global = mesh%np_global + sizes(ii)
358 end do
359 safe_deallocate_a(sizes)
360 safe_deallocate_a(grid_to_spatial_reordered)
361
362 ! reorder boundary points
363 call make_array_larger(boundary_to_spatial, np_boundary)
364 call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
365 safe_deallocate_a(boundary_to_spatial)
366
367 call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
368 safe_deallocate_a(boundary_to_spatial_reordered)
369
370 ! global grid size
371 np_boundary = sizes(mpi_world%rank)
372 mesh%np_part_global = mesh%np_global + sizes(0)
373 do ii = 1, mpi_world%size - 1
374 mesh%np_part_global = mesh%np_part_global + sizes(ii)
375 end do
376 safe_deallocate_a(sizes)
377
378
379 ! get global indices
380#ifdef HAVE_MPI
381 ! create shared memory window and fill it only on root
382 call create_intranode_communicator(mpi_world, intranode_grp, internode_grp)
383 call lmpi_create_shared_memory_window(mesh%np_part_global, intranode_grp, &
384 mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
385#else
386 safe_allocate(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
387#endif
388 ! inner grid
389 call get_sizes_offsets(np, sizes, offsets)
390 call mpi_world%gatherv(grid_to_spatial, np, mpi_integer8, &
391 mesh%idx%grid_to_spatial_global, sizes, offsets, mpi_integer8, 0)
392
393 ! boundary indices
394 call get_sizes_offsets(np_boundary, sizes, offsets)
395 call mpi_world%gatherv(boundary_to_spatial, np_boundary, mpi_integer8, &
396 mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, mpi_integer8, 0)
397
398 ! fill global hash map
399#ifdef HAVE_MPI
400 ! create shared memory window and fill it only on root
401 call lmpi_create_shared_memory_window(spatial_size, intranode_grp, &
402 mesh%idx%window_spatial_to_grid, ptr)
403 mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
404#else
405 safe_allocate(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
406#endif
407 if (mpi_world%is_root()) then
408 ! fill only on root, then broadcast
409 !$omp parallel do
410 do ispatial = 0, spatial_size-1
411 mesh%idx%spatial_to_grid_global(ispatial) = 0
412 end do
413 !$omp parallel do
414 do ipg = 1, mesh%np_part_global
415 mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
416 end do
417 end if
418
419#ifdef HAVE_MPI
420 ! now broadcast the global arrays to local rank 0 on each node
421 if (intranode_grp%is_root()) then
422 call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, mpi_integer8, 0)
423 call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, mpi_integer8, 0)
424 end if
425 call lmpi_sync_shared_memory_window(mesh%idx%window_grid_to_spatial, intranode_grp)
426 call lmpi_sync_shared_memory_window(mesh%idx%window_spatial_to_grid, intranode_grp)
427#endif
428
429 safe_deallocate_a(offsets)
430 safe_deallocate_a(sizes)
431
432 safe_deallocate_a(boundary_to_spatial)
433 safe_deallocate_a(grid_to_spatial)
434
435 pop_sub_with_profile(mesh_init_stage_2)
436 end subroutine mesh_init_stage_2
437
438! ---------------------------------------------------------
443! ---------------------------------------------------------
444 subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
445 class(mesh_t), intent(inout) :: mesh
446 type(namespace_t), intent(in) :: namespace
447 class(space_t), intent(in) :: space
448 type(stencil_t), intent(in) :: stencil
449 type(multicomm_t), intent(in) :: mc
450 type(mesh_t), optional, intent(in) :: parent
451 logical, optional, intent(in) :: regenerate
452
453 integer :: ip, jj(space%dim), np
454
455 push_sub_with_profile(mesh_init_stage_3)
456
457 call mpi_grp_init(mesh%mpi_grp, mc%group_comm(p_strategy_domains))
458
459 ! check if we are running in parallel in domains
460 mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
461
462 call checksum_calculate(1, mesh%np_part_global, mesh%idx%grid_to_spatial_global(1), &
463 mesh%idx%checksum)
464
465 if (mesh%parallel_in_domains) then
466 call do_partition()
467 else
468 ! When running serially those two are the same.
469 assert(mesh%np_part_global < huge(mesh%np_part))
470 mesh%np = i8_to_i4(mesh%np_global)
471 mesh%np_part = i8_to_i4(mesh%np_part_global)
472
473 ! These must be initialized for par_vec_gather, par_vec_scatter to work
474 ! as copy operations when running without domain parallelization.
475 mesh%pv%np_global = mesh%np_global
476 mesh%pv%np_ghost = 0
477 mesh%pv%np_bndry = mesh%np_part - mesh%np
478 mesh%pv%npart = 1
479 mesh%pv%xlocal = 1
480 end if
481
482 ! Compute mesh%x
483 safe_allocate(mesh%x(1:space%dim, 1:mesh%np_part))
484 safe_allocate(mesh%x_t(1:mesh%np_part, 1:space%dim))
485 do ip = 1, mesh%np_part
486 mesh%x(1:space%dim, ip) = mesh_x_global(mesh, mesh_local2global(mesh, ip))
487 mesh%x_t(ip, 1:space%dim) = mesh%x(1:space%dim, ip)
488 end do
489
490 ! save chi, i.e. primitive coordinates
491 safe_allocate(mesh%chi(1:space%dim, 1:mesh%np_part))
492 do ip = 1, mesh%np_part
493 call mesh_local_index_to_coords(mesh, ip, jj)
494 mesh%chi(:, ip) = jj*mesh%spacing
495 end do
496
497 call mesh_get_vol_pp()
498
499 ! save inverse jacobian
500 if (mesh%coord_system%local_basis) then
501 np = mesh%np_part
502 else
503 np = 1
504 end if
505 safe_allocate(mesh%jacobian_inverse(1:space%dim, 1:space%dim, np))
506 do ip = 1, np
507 mesh%jacobian_inverse(:, :, ip) = mesh%coord_system%jacobian_inverse(mesh%chi(:, ip))
508 end do
509
510 pop_sub_with_profile(mesh_init_stage_3)
511
512 contains
513 ! ---------------------------------------------------------
514 subroutine do_partition()
515#ifdef HAVE_MPI
516 integer :: jj, ipart, jpart
517 integer(int64) :: ipg, jpg
518 integer, allocatable :: gindex(:), gedges(:)
519 logical, allocatable :: nb(:, :)
520 integer :: idx(space%dim), jx(space%dim)
521 type(mpi_comm) :: graph_comm
522 integer :: iedge, nnb
523 logical :: use_topo, reorder, partition_print
524 integer :: ierr
525
526 logical :: has_virtual_partition = .false.
527 integer :: vsize
528 type(restart_t) :: restart_load, restart_dump
529 integer, allocatable :: part_vec(:)
530
532
533 !Try to load the partition from the restart files
534 if (.not. optional_default(regenerate, .false.)) then
535 call restart_load%init(namespace, restart_partition, restart_type_load, mc, ierr, mesh=mesh, exact=.true.)
536 if (ierr == 0) call mesh_partition_load(restart_load, mesh, ierr)
537 call restart_load%end()
538 else
539 ierr = 0
540 end if
541
542 if (ierr /= 0) then
543
544 !%Variable MeshPartitionVirtualSize
545 !%Type integer
546 !%Default mesh mpi_grp size
547 !%Section Execution::Parallelization
548 !%Description
549 !% Gives the possibility to change the partition nodes.
550 !% Afterward, it crashes.
551 !%End
552 call parse_variable(namespace, 'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
553
554 if (vsize /= mesh%mpi_grp%size) then
555 write(message(1),'(a,I7)') "Changing the partition size to", vsize
556 write(message(2),'(a)') "The execution will crash."
557 call messages_warning(2, namespace=namespace)
558 has_virtual_partition = .true.
559 else
560 has_virtual_partition = .false.
561 end if
562
563 if (.not. present(parent)) then
564 call mesh_partition(mesh, namespace, space, stencil, vsize)
565 else
566 ! if there is a parent grid, use its partition
567 call mesh_partition_from_parent(mesh, parent)
568 end if
569
570 !Now that we have the partitions, we save them
571 call restart_dump%init(namespace, restart_partition, restart_type_dump, mc, ierr, mesh=mesh)
572 call mesh_partition_dump(restart_dump, mesh, vsize, ierr)
573 call restart_dump%end()
574 end if
575
576 if (has_virtual_partition) then
577 call profiling_end(namespace)
578 call print_date("Calculation ended on ")
579 write(message(1),'(a)') "Execution has ended."
580 write(message(2),'(a)') "If you want to run your system, do not use MeshPartitionVirtualSize."
581 call messages_warning(2, namespace=namespace)
582 call messages_end()
583 call global_end()
584 stop
585 end if
586
587 !%Variable MeshUseTopology
588 !%Type logical
589 !%Default false
590 !%Section Execution::Parallelization
591 !%Description
592 !% (experimental) If enabled, <tt>Octopus</tt> will use an MPI virtual
593 !% topology to map the processors. This can improve performance
594 !% for certain interconnection systems.
595 !%End
596 call parse_variable(namespace, 'MeshUseTopology', .false., use_topo)
597
598 if (use_topo) then
599 ! At the moment we still need the global partition. This will be removed in near future.
600 safe_allocate(part_vec(1:mesh%np_global))
601 call partition_get_global(mesh%partition, part_vec(1:mesh%np_global))
602
603
604 ! generate a table of neighbours
605
606 safe_allocate(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
607 nb = .false.
608
609 do ipg = 1, mesh%np_global
610 ipart = part_vec(ipg)
611 call mesh_global_index_to_coords(mesh, ipg, idx)
612 do jj = 1, stencil%size
613 jx = idx + stencil%points(:, jj)
614 jpg = mesh_global_index_from_coords(mesh, jx)
615 if (jpg > 0 .and. jpg <= mesh%np_global) then
616 jpart = part_vec(jpg)
617 if (ipart /= jpart ) nb(ipart, jpart) = .true.
618 end if
619 end do
620 end do
621 safe_deallocate_a(part_vec)
622
623 ! now generate the information of the graph
624
625 safe_allocate(gindex(1:mesh%mpi_grp%size))
626 safe_allocate(gedges(1:count(nb)))
627
628 ! and now generate it
629 iedge = 0
630 do ipart = 1, mesh%mpi_grp%size
631 do jpart = 1, mesh%mpi_grp%size
632 if (nb(ipart, jpart)) then
633 iedge = iedge + 1
634 gedges(iedge) = jpart - 1
635 end if
636 end do
637 gindex(ipart) = iedge
638 end do
639
640 assert(iedge == count(nb))
641
642 reorder = .true.
643 call mpi_graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm)
644
645 ! we have a new communicator
646 call mpi_grp_init(mesh%mpi_grp, graph_comm)
647
648 safe_deallocate_a(nb)
649 safe_deallocate_a(gindex)
650 safe_deallocate_a(gedges)
651
652 end if
653
654 if (optional_default(regenerate, .false.)) call par_vec_end(mesh%pv)
655 call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
656 space, mesh%partition, mesh%pv, namespace)
657
658 ! check the number of ghost neighbours in parallel
659 nnb = 0
660 jpart = mesh%pv%partno
661 do ipart = 1, mesh%pv%npart
662 if (ipart == jpart) cycle
663 if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
664 end do
665 assert(nnb >= 0 .and. nnb < mesh%pv%npart)
666
667 ! Set local point numbers.
668 mesh%np = mesh%pv%np_local
669 mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
670
671 !%Variable PartitionPrint
672 !%Type logical
673 !%Default true
674 !%Section Execution::Parallelization
675 !%Description
676 !% (experimental) If disabled, <tt>Octopus</tt> will not compute
677 !% nor print the partition information, such as local points,
678 !% no. of neighbours, ghost points and boundary points.
679 !%End
680 call parse_variable(namespace, 'PartitionPrint', .true., partition_print)
681
682 if (partition_print) then
683 call mesh_partition_write_info(mesh, namespace=namespace)
684 call mesh_partition_messages_debug(mesh, namespace)
685 end if
686#endif
687
689 end subroutine do_partition
690
691
692 ! ---------------------------------------------------------
694 subroutine mesh_get_vol_pp()
695
696 integer :: ip, np
697
699
700 np = 1
701 if (mesh%use_curvilinear) np = mesh%np_part
702 ! If no local point, we should not try to access the arrays
703 if (mesh%np_part == 0) np = 0
704
705 safe_allocate(mesh%vol_pp(1:np))
706
707 do ip = 1, np
708 mesh%vol_pp(ip) = product(mesh%spacing)
709 end do
710
711 do ip = 1, np
712 mesh%vol_pp(ip) = mesh%vol_pp(ip)*mesh%coord_system%jacobian_determinant(mesh%chi(:, ip))
713 end do
714
715 if (mesh%use_curvilinear .or. mesh%np_part == 0) then
716 mesh%volume_element = m_one
717 else
718 mesh%volume_element = mesh%vol_pp(1)
719 end if
720
722 end subroutine mesh_get_vol_pp
723
724 end subroutine mesh_init_stage_3
725
730 subroutine rebalance_array(data_input, data_output, output_sizes)
731 integer(int64), contiguous, intent(in) :: data_input(:)
732 integer(int64), allocatable, intent(out) :: data_output(:)
733 integer, allocatable, optional, intent(out) :: output_sizes(:)
734
735 integer, allocatable :: initial_sizes(:), final_sizes(:)
736 integer(int64), allocatable :: initial_offsets(:), final_offsets(:)
737 integer, allocatable :: scounts(:), sdispls(:)
738 integer, allocatable :: rcounts(:), rdispls(:)
739 integer :: irank
740 integer(int64) :: itmp
741
742 push_sub(rebalance_array)
743
744 ! collect current sizes of distributed array
745 safe_allocate(initial_sizes(0:mpi_world%size-1))
746 call mpi_world%allgather(size(data_input), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
747 safe_allocate(initial_offsets(0:mpi_world%size))
748 initial_offsets(0) = 0
749 do irank = 1, mpi_world%size
750 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
751 end do
752
753 ! now redistribute the arrays
754 ! use block data decomposition of grid indices
755 safe_allocate(final_offsets(0:mpi_world%size))
756 safe_allocate(final_sizes(0:mpi_world%size-1))
757
758 do irank = 0, mpi_world%size
759 final_offsets(irank) = sum(int(initial_sizes, int64)) * irank/mpi_world%size
760 end do
761 do irank = 0, mpi_world%size - 1
762 assert(final_offsets(irank + 1) - final_offsets(irank) < huge(0_int32))
763 final_sizes(irank) = int(final_offsets(irank + 1) - final_offsets(irank), int32)
764 end do
765
766 safe_allocate(scounts(0:mpi_world%size-1))
767 safe_allocate(sdispls(0:mpi_world%size-1))
768 safe_allocate(rcounts(0:mpi_world%size-1))
769 safe_allocate(rdispls(0:mpi_world%size-1))
770 ! determine communication pattern
771 scounts = 0
772 do irank = 0, mpi_world%size - 1
773 ! get overlap of initial and final distribution
774 itmp = min(final_offsets(irank+1), initial_offsets(mpi_world%rank+1)) - &
775 max(final_offsets(irank), initial_offsets(mpi_world%rank))
776 assert(itmp < huge(0_int32))
777 if (itmp < 0) then
778 scounts(irank) = 0
779 else
780 scounts(irank) = int(itmp, int32)
781 end if
782 end do
783 sdispls(0) = 0
784 do irank = 1, mpi_world%size - 1
785 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
786 end do
787 assert(sum(int(scounts, int64)) < huge(0_int32))
788 assert(sum(scounts) == initial_sizes(mpi_world%rank))
789
790 rcounts = 0
791 do irank = 0, mpi_world%size - 1
792 ! get overlap of initial and final distribution
793 itmp = min(final_offsets(mpi_world%rank+1), initial_offsets(irank+1)) - &
794 max(final_offsets(mpi_world%rank), initial_offsets(irank))
795 assert(itmp < huge(0_int32))
796 if (itmp < 0) then
797 rcounts(irank) = 0
798 else
799 rcounts(irank) = int(itmp, int32)
800 end if
801 end do
802 rdispls(0) = 0
803 do irank = 1, mpi_world%size - 1
804 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
805 end do
806 ! check for consistency between sending and receiving
807 assert(sum(rcounts) == final_sizes(mpi_world%rank))
808
809 safe_allocate(data_output(1:final_sizes(mpi_world%rank)))
810 call mpi_world%alltoallv(data_input, scounts, sdispls, mpi_integer8, &
811 data_output, rcounts, rdispls, mpi_integer8)
812
813 ! save final sizes of array if optional argument present
814 if (present(output_sizes)) then
815 safe_allocate(output_sizes(0:mpi_world%size-1))
816 output_sizes(:) = final_sizes(:)
817 end if
818
819 safe_deallocate_a(final_offsets)
820 safe_deallocate_a(final_sizes)
821
822 safe_deallocate_a(scounts)
823 safe_deallocate_a(sdispls)
824 safe_deallocate_a(rcounts)
825 safe_deallocate_a(rdispls)
826
827
828 pop_sub(rebalance_array)
829 end subroutine rebalance_array
830
835 subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
836 type(namespace_t), intent(in) :: namespace
837 class(space_t), intent(in) :: space
838 type(index_t), intent(in) :: idx
839 integer(int64), intent(in) :: grid_to_spatial(:)
840 integer(int64), allocatable, intent(out) :: grid_to_spatial_reordered(:)
841
842 integer :: bsize(space%dim), order, default
843 integer :: nn, idir, ipg, ip, number_of_blocks(space%dim)
844 type(block_t) :: blk
845 integer, parameter :: &
846 ORDER_BLOCKS = 1, &
847 order_original = 2, &
848 order_cube = 3
849 integer :: point(1:space%dim)
850 integer(int64), allocatable :: reorder_indices(:), reorder_recv(:)
851 integer, allocatable :: index_map(:), indices(:)
852 integer(int64), allocatable :: grid_to_spatial_recv(:)
853 integer, allocatable :: initial_sizes(:)
854 integer(int64), allocatable :: initial_offsets(:)
855 integer(int64) :: istart, iend, indstart, indend, spatial_size
856 integer :: irank, local_size, num_recv
857 integer :: iunique, nunique
858 integer :: direction
859 logical :: increase_with_dimension
860
861 integer, allocatable :: scounts(:), sdispls(:), rcounts(:), rdispls(:)
862 integer(int64), allocatable :: spatial_cutoff(:)
863
864 push_sub(reorder_points)
865
866 !%Variable MeshOrder
867 !%Type integer
868 !%Section Execution::Optimization
869 !%Description
870 !% This variable controls how the grid points are mapped to a
871 !% linear array for global arrays. For runs that are parallel
872 !% in domains, the local mesh order may be different (see
873 !% <tt>MeshLocalOrder</tt>).
874 !% The default is blocks when serial in domains and cube when
875 !% parallel in domains with the local mesh order set to blocks.
876 !%Option order_blocks 1
877 !% The grid is mapped using small parallelepipedic grids. The size
878 !% of the blocks is controlled by <tt>MeshBlockSize</tt>.
879 !%Option order_original 2
880 !% The original order of the indices is used to map the grid.
881 !%Option order_cube 3
882 !% The grid is mapped using a full cube, i.e. without blocking.
883 !%End
884 default = order_blocks
885 call parse_variable(namespace, 'MeshOrder', default, order)
886 ! no reordering in 1D necessary
887 if (space%dim == 1) then
888 order = order_original
889 end if
890
891 !%Variable MeshBlockDirection
892 !%Type integer
893 !%Section Execution::Optimization
894 !%Description
895 !% Determines the direction in which the dimensions are chosen to compute
896 !% the blocked index for sorting the mesh points (see MeshBlockSize).
897 !% The default is increase_with_dimensions, corresponding to xyz ordering
898 !% in 3D.
899 !%Option increase_with_dimension 1
900 !% The fastest changing index is in the first dimension, i.e., in 3D this
901 !% corresponds to ordering in xyz directions.
902 !%Option decrease_with_dimension 2
903 !% The fastest changing index is in the last dimension, i.e., in 3D this
904 !% corresponds to ordering in zyx directions.
905 !%End
906 call parse_variable(namespace, 'MeshBlockDirection', 1, direction)
907 increase_with_dimension = direction == 1
908 if (direction /= 1 .and. direction /= 2) then
909 call messages_input_error(namespace, 'MeshBlockDirection')
910 end if
911
912 select case (order)
913 case (order_original)
914 ! only copy points, they stay in their original ordering
915 safe_allocate(grid_to_spatial_reordered(1:size(grid_to_spatial)))
916 grid_to_spatial_reordered(1:size(grid_to_spatial)) = grid_to_spatial(1:size(grid_to_spatial))
917 case (order_blocks, order_cube)
918 if (order == order_cube) then
919 bsize = idx%nr(2, :) - idx%nr(1, :) + 1
920 else
921 !%Variable MeshBlockSize
922 !%Type block
923 !%Section Execution::Optimization
924 !%Description
925 !% To improve memory-access locality when calculating derivatives,
926 !% <tt>Octopus</tt> arranges mesh points in blocks. This variable
927 !% controls the size of this blocks in the different
928 !% directions. The default is selected according to the value of
929 !% the <tt>StatesBlockSize</tt> variable. (This variable only affects the
930 !% performance of <tt>Octopus</tt> and not the results.)
931 !%End
932 if (conf%target_states_block_size < 16) then
933 bsize(1) = 80 * 4 / abs(conf%target_states_block_size)
934 if (space%dim > 1) bsize(2) = 4
935 if (space%dim > 2) bsize(3:) = 10
936 else
937 bsize(1) = max(4 * 16 / abs(conf%target_states_block_size), 1)
938 if (space%dim > 1) bsize(2) = 15
939 if (space%dim > 2) bsize(3:) = 15
940 end if
941
942 if (parse_block(namespace, 'MeshBlockSize', blk) == 0) then
943 nn = parse_block_cols(blk, 0)
944 if (nn /= space%dim) then
945 message(1) = "Error: number of entries in MeshBlockSize must match the number of dimensions."
946 call messages_fatal(1, namespace=namespace)
947 end if
948 do idir = 1, nn
949 call parse_block_integer(blk, 0, idir - 1, bsize(idir))
950 end do
951 end if
952 end if
953
954 number_of_blocks = (idx%nr(2, :) - idx%nr(1, :) + 1) / bsize + 1
955
956
957 ! do the global reordering in parallel, use block data decomposition of global indices
958 ! reorder indices along blocked parallelepiped curve
959
960 ! collect current sizes of distributed array
961 safe_allocate(initial_sizes(0:mpi_world%size-1))
962 call mpi_world%allgather(size(grid_to_spatial), 1, mpi_integer, initial_sizes(0), 1, mpi_integer)
963 safe_allocate(initial_offsets(0:mpi_world%size))
964 initial_offsets(0) = 0
965 do irank = 1, mpi_world%size
966 initial_offsets(irank) = initial_offsets(irank-1) + initial_sizes(irank-1)
967 end do
968
969 ! get local range and size
970 istart = initial_offsets(mpi_world%rank)
971 iend = initial_offsets(mpi_world%rank + 1) - 1
972 assert(iend - istart + 1 < huge(0_int32))
973 local_size = int(iend - istart + 1, int32)
974 assert(local_size == initial_sizes(mpi_world%rank))
975
976 ! compute new indices locally
977 safe_allocate(reorder_indices(1:local_size))
978 safe_allocate(indices(1:local_size))
979 safe_allocate(grid_to_spatial_reordered(1:local_size))
980 !$omp parallel do private(point)
981 do ip = 1, local_size
982 call index_spatial_to_point(idx, space%dim, grid_to_spatial(ip), point)
983 point = point + idx%offset
984 reorder_indices(ip) = get_blocked_index(space%dim, point, bsize, number_of_blocks, increase_with_dimension)
985 end do
986 ! parallel sort according to the new indices
987 ! sort the local array
988 call sort(reorder_indices, indices)
989 ! save reordered indices to send to other processes
990 !$omp parallel do
991 do ip = 1, local_size
992 grid_to_spatial_reordered(ip) = grid_to_spatial(indices(ip))
993 end do
994
995 ! get minimum and maximum
996 if(local_size > 0) then
997 indstart = reorder_indices(1)
998 indend = reorder_indices(local_size)
999 else
1000 indstart = huge(1_int64)
1001 indend = 0
1002 end if
1003 call mpi_world%allreduce_inplace(indstart, 1, mpi_integer8, mpi_min)
1004 call mpi_world%allreduce_inplace(indend, 1, mpi_integer8, mpi_max)
1005 spatial_size = indend - indstart + 1
1006
1007 ! get index ranges for each rank
1008 safe_allocate(spatial_cutoff(0:mpi_world%size-1))
1009 do irank = 0, mpi_world%size - 1
1010 spatial_cutoff(irank) = spatial_size * (irank+1)/mpi_world%size + indstart
1011 end do
1012
1013 safe_allocate(scounts(0:mpi_world%size-1))
1014 safe_allocate(sdispls(0:mpi_world%size-1))
1015 safe_allocate(rcounts(0:mpi_world%size-1))
1016 safe_allocate(rdispls(0:mpi_world%size-1))
1017 ! get send counts
1018 scounts = 0
1019 irank = 0
1020 ! the indices are ordered, so we can go through them and increase
1021 ! the rank to which they are associated to when we cross a cutoff
1022 do ip = 1, local_size
1023 if (reorder_indices(ip) >= spatial_cutoff(irank)) then
1024 ! this do loop is needed in case some ranks do not have any points
1025 do while (reorder_indices(ip) >= spatial_cutoff(irank))
1026 irank = irank + 1
1027 end do
1028 assert(irank < mpi_world%size)
1029 end if
1030 scounts(irank) = scounts(irank) + 1
1031 end do
1032 safe_deallocate_a(spatial_cutoff)
1033 assert(sum(scounts) == local_size)
1034
1035 ! compute communication pattern (sdispls, rcounts, rdispls)
1036 sdispls(0) = 0
1037 do irank = 1, mpi_world%size - 1
1038 sdispls(irank) = sdispls(irank - 1) + scounts(irank - 1)
1039 end do
1040
1041 call mpi_world%alltoall(scounts, 1, mpi_integer, &
1042 rcounts, 1, mpi_integer)
1043
1044 rdispls(0) = 0
1045 do irank = 1, mpi_world%size - 1
1046 rdispls(irank) = rdispls(irank - 1) + rcounts(irank - 1)
1047 end do
1048
1049 ! make sure the arrays get allocated also if we do not receive anything
1050 num_recv = max(sum(rcounts), 1)
1051 ! communicate the locally sorted indices
1052 safe_allocate(reorder_recv(1:num_recv))
1053 call mpi_world%alltoallv(reorder_indices, scounts, sdispls, mpi_integer8, &
1054 reorder_recv, rcounts, rdispls, mpi_integer8)
1055 safe_deallocate_a(reorder_indices)
1056
1057 ! communicate the corresponding spatial indices
1058 safe_allocate(grid_to_spatial_recv(1:num_recv))
1059 call mpi_world%alltoallv(grid_to_spatial_reordered, scounts, sdispls, mpi_integer8, &
1060 grid_to_spatial_recv, rcounts, rdispls, mpi_integer8)
1061 safe_deallocate_a(grid_to_spatial_reordered)
1062
1063 ! do k-way merge of sorted indices
1064 safe_allocate(reorder_indices(1:num_recv))
1065 safe_allocate(index_map(1:num_recv))
1066 if (sum(rcounts) > 0) then
1067 call merge_sorted_arrays(reorder_recv, rcounts, reorder_indices, index_map)
1068
1069 ! get number of unique indices, needed for boundary
1070 nunique = 1
1071 do ipg = 2, sum(rcounts)
1072 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1073 nunique = nunique + 1
1074 end if
1075 end do
1076
1077 ! reorder according to new order, but remove duplicate entries
1078 safe_allocate(grid_to_spatial_reordered(1:nunique))
1079 iunique = 1
1080 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(1))
1081 do ipg = 2, sum(rcounts)
1082 if (reorder_indices(ipg) /= reorder_indices(ipg-1)) then
1083 iunique = iunique + 1
1084 grid_to_spatial_reordered(iunique) = grid_to_spatial_recv(index_map(ipg))
1085 end if
1086 end do
1087 else
1088 safe_allocate(grid_to_spatial_reordered(1:0))
1089 end if
1090
1091 safe_deallocate_a(initial_offsets)
1092 safe_deallocate_a(initial_sizes)
1093
1094 safe_deallocate_a(reorder_indices)
1095 safe_deallocate_a(reorder_recv)
1096
1097 safe_deallocate_a(grid_to_spatial_recv)
1098 safe_deallocate_a(index_map)
1099 safe_deallocate_a(indices)
1100
1101 safe_deallocate_a(scounts)
1102 safe_deallocate_a(sdispls)
1103 safe_deallocate_a(rcounts)
1104 safe_deallocate_a(rdispls)
1105
1106 end select
1107 pop_sub(reorder_points)
1108 end subroutine reorder_points
1109
1112 subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
1113 integer, intent(in) :: local_size
1114 integer, allocatable, intent(out) :: sizes(:)
1115 integer(int64), allocatable, intent(out) :: offsets(:)
1116 type(mpi_grp_t), optional, intent(in) :: mpi_grp
1117
1118 integer :: irank
1119 type(mpi_grp_t) :: mpi_grp_
1120
1121 push_sub(get_sizes_offsets)
1122
1123 if (present(mpi_grp)) then
1124 mpi_grp_ = mpi_grp
1125 else
1126 mpi_grp_ = mpi_world
1127 end if
1128
1129 safe_allocate(sizes(0:mpi_grp_%size-1))
1130 call mpi_grp_%allgather(local_size, 1, mpi_integer, sizes(0), 1, mpi_integer)
1131 safe_allocate(offsets(0:mpi_grp_%size))
1132 offsets(0) = 0
1133 do irank = 1, mpi_grp_%size
1134 offsets(irank) = offsets(irank-1) + sizes(irank-1)
1135 end do
1136
1137 pop_sub(get_sizes_offsets)
1138 end subroutine get_sizes_offsets
1139
1140end module mesh_init_oct_m
1141
1142!! Local Variables:
1143!! mode: f90
1144!! coding: utf-8
1145!! End:
Box bounds along some axes.
Definition: box.F90:180
This is the common interface to a sorting routine. It performs the shell algorithm,...
Definition: sort.F90:151
double log(double __x) __attribute__((__nothrow__
subroutine do_partition()
Definition: mesh_init.F90:600
subroutine mesh_get_vol_pp()
calculate the volume of integration
Definition: mesh_init.F90:608
real(real64), parameter, public box_boundary_delta
Definition: box.F90:134
This module handles the calculation mode.
integer, parameter, public p_strategy_domains
parallelization in domains
real(real64), parameter, public m_two
Definition: global.F90:193
subroutine, public global_end()
Finalise parser varinfo file, and MPI.
Definition: global.F90:422
real(real64), parameter, public m_zero
Definition: global.F90:191
type(conf_t), public conf
Global instance of Octopus configuration.
Definition: global.F90:181
real(real64), parameter, public m_one
Definition: global.F90:192
This module implements a simple hash table for non-negative integer keys and integer values.
Definition: iihash.F90:127
integer function, public lihash_lookup(h, key, found)
Look up a value in the hash table h. If found is present, it indicates if key could be found in the t...
Definition: iihash.F90:336
subroutine, public lihash_insert(h, key, val)
Insert a (key, val) pair into the hash table h.
Definition: iihash.F90:310
subroutine, public lihash_end(h)
Free a hash table.
Definition: iihash.F90:288
subroutine, public lihash_init(h)
Initialize a hash table h.
Definition: iihash.F90:265
This module implements the index, used for the mesh points.
Definition: index.F90:124
subroutine, public index_init(idx, dim)
This subroutine allocates memory and initializes some components.
Definition: index.F90:220
integer(int64) function, public get_blocked_index(dim, point, bsize, number_of_blocks, increase_with_dimensions)
Definition: index.F90:499
subroutine, public index_spatial_to_point(idx, dim, ispatial, point)
Definition: index.F90:400
integer, parameter, public idx_hilbert
Definition: index.F90:169
subroutine, public index_point_to_spatial(idx, dim, ispatial, point)
Definition: index.F90:418
integer, parameter, public idx_cubic
Definition: index.F90:169
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:117
subroutine, public merge_sorted_arrays(array, sizes, merged, index_map)
This module contains subroutines, related to the initialization of the mesh.
Definition: mesh_init.F90:119
subroutine reorder_points(namespace, space, idx, grid_to_spatial, grid_to_spatial_reordered)
reorder the points in the grid according to the variables MeshOrder and MeshLocalOrder
Definition: mesh_init.F90:749
subroutine rebalance_array(data_input, data_output, output_sizes)
re-distribute the points to improve load balancing
Definition: mesh_init.F90:644
subroutine, public mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
When running parallel in domains, stencil and np_stencil are needed to compute the ghost points....
Definition: mesh_init.F90:530
subroutine, public mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
First stage mesh initialization.
Definition: mesh_init.F90:172
subroutine get_sizes_offsets(local_size, sizes, offsets, mpi_grp)
return the sizes and offsets of a distributed array for all tasks of a mpi group.
Definition: mesh_init.F90:1026
subroutine, public mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
This subroutine creates the global array of spatial indices and the inverse mapping.
Definition: mesh_init.F90:292
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:120
subroutine, public mesh_global_index_to_coords(mesh, ipg, ix)
Given a global point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:928
integer(int64) function, public mesh_global_index_from_coords(mesh, ix)
This function returns the true global index of the point for a given vector of integer coordinates.
Definition: mesh.F90:919
subroutine, public mesh_local_index_to_coords(mesh, ip, ix)
Given a local point index, this function returns the set of integer coordinates of the point.
Definition: mesh.F90:950
integer(int64) function, public mesh_local2global(mesh, ip)
This function returns the global mesh index for a given local index.
Definition: mesh.F90:962
real(real64) function, dimension(1:mesh%box%dim), public mesh_x_global(mesh, ipg)
Given a global point index, this function returns the coordinates of the point.
Definition: mesh.F90:817
subroutine, public mesh_partition_from_parent(mesh, parent)
create a mesh partition from a given parent mesh
subroutine, public mesh_partition_write_info(mesh, iunit, namespace)
subroutine, public mesh_partition_messages_debug(mesh, namespace)
subroutine, public mesh_partition(mesh, namespace, space, lapl_stencil, vsize)
This routine converts the mesh given by grid points into a graph.
subroutine, public mesh_partition_dump(restart, mesh, vsize, ierr)
subroutine, public mesh_partition_load(restart, mesh, ierr)
subroutine, public messages_end()
Definition: messages.F90:273
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:525
subroutine, public print_date(str)
Definition: messages.F90:983
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:162
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:410
subroutine, public messages_input_error(namespace, var, details, row, column)
Definition: messages.F90:691
This module contains some common usage patterns of MPI routines.
Definition: mpi_lib.F90:117
type(mpi_grp_t), public mpi_world
Definition: mpi.F90:272
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:347
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:147
Some general things and nomenclature:
Definition: par_vec.F90:173
subroutine, public par_vec_end(pv)
Deallocate memory used by pv.
Definition: par_vec.F90:746
subroutine, public par_vec_init(mpi_grp, np_global, idx, stencil, space, partition, pv, namespace)
Initializes a par_vec_type object (parallel vector).
Definition: par_vec.F90:296
integer function, public parse_block(namespace, name, blk, check_varinfo_)
Definition: parser.F90:615
subroutine, public partition_get_global(partition, part_global, root)
Returns the global partition. If root is present, the partition is gathered only in that node....
Definition: partition.F90:389
subroutine, public profiling_end(namespace)
Definition: profiling.F90:415
integer, parameter, public restart_partition
Definition: restart.F90:156
integer, parameter, public restart_type_dump
Definition: restart.F90:183
integer, parameter, public restart_type_load
Definition: restart.F90:183
This module is intended to contain "only mathematical" functions and procedures.
Definition: sort.F90:119
This module defines stencils used in Octopus.
Definition: stencil.F90:137
This module is intended to contain simple general-purpose utility functions and procedures.
Definition: utils.F90:120
subroutine, public make_array_larger(array, new_size)
Definition: utils.F90:557
class to tell whether a point is inside or outside
Definition: box.F90:143
Describes mesh distribution to nodes.
Definition: mesh.F90:187
This is defined even when running serial.
Definition: mpi.F90:144
Stores all communicators and groups.
Definition: multicomm.F90:208
The class representing the stencil, which is used for non-local mesh operations.
Definition: stencil.F90:165
int true(void)