Mesh Setup
Setting up a grid
In particular, we have:
-
the “first stage”:
This sets up derivatives, double-grid and calls stages 1 and 2 of mesh_init. At this stage we are still unaware or parallelism and domain decomposition.
Expand me...
subroutine grid_init_stage_1(gr, namespace, space, symm, latt, n_sites, site_position)
type(grid_t), intent(inout) :: gr
type(namespace_t), intent(in) :: namespace
class(space_t), intent(in) :: space
type(symmetries_t), optional, intent(in) :: symm
type(lattice_vectors_t), optional, intent(in) :: latt
integer, optional, intent(in) :: n_sites
real(real64), optional, intent(in) :: site_position(:,:)
type(stencil_t) :: cube
integer :: enlarge(space%dim)
type(block_t) :: blk
integer :: idir
real(real64) :: grid_spacing(space%dim)
PUSH_SUB(grid_init_stage_1)
gr%box => box_factory_create(namespace, space, latt=latt, n_sites=n_sites, site_position=site_position)
if (present(symm)) then
gr%symm = symm
end if
!%Variable Spacing
!%Type float
!%Section Mesh
!%Description
!% The spacing between the points in the mesh. This controls the
!% quality of the discretization: smaller spacing gives more
!% precise results but increased computational cost.
!%
!% When using curvilinear coordinates, this is a canonical spacing
!% that will be changed locally by the transformation. In periodic
!% directions, your spacing may be slightly different than what
!% you request here, since the box size must be an integer
!% multiple of the spacing.
!%
!% The default value is defined by the image resolution if <tt>BoxShape =
!% box_image</tt>. Othewise here is no default otherwise.
!%
!% It is possible to have a different spacing in each one of the Cartesian directions
!% if we define <tt>Spacing</tt> as block of the form
!%
!% <tt>%Spacing
!% <br> spacing_x | spacing_y | spacing_z
!% <br>%</tt>
!%End
if(parse_block(namespace, 'Spacing', blk) == 0) then
if(parse_block_cols(blk,0) < space%dim) call messages_input_error(namespace, 'Spacing')
do idir = 1, space%dim
call parse_block_float(blk, 0, idir - 1, grid_spacing(idir), units_inp%length)
end do
call parse_block_end(blk)
else
call parse_variable(namespace, 'Spacing', -M_ONE, grid_spacing(1), units_inp%length)
grid_spacing = grid_spacing(1)
end if
if (associated(gr%box)) then
select type (box => gr%box)
type is (box_image_t)
do idir = 1, space%dim
! default grid_spacing is determined from the pixel size such that one grid point = one pixel.
if (grid_spacing(idir) < M_ZERO) then
grid_spacing(idir) = box%pixel_size(idir)
end if
end do
end select
end if
if (any(grid_spacing(1:space%dim) < M_EPSILON)) then
message(1) = " Your input for 'Spacing' is negative or zero."
call messages_fatal(1, namespace=namespace)
end if
!%Variable PeriodicBoundaryMask
!%Type block
!%Section Mesh
!%Description
!% (Experimental) Defines a mask for which periodic boundaries are replaced by zero boundary conditions.
!%End
if (parse_block(namespace, 'PeriodicBoundaryMask', blk) < 0) then
gr%masked_periodic_boundaries = .false.
else
gr%masked_periodic_boundaries = .true.
call parse_block_string(blk, 0, 0, gr%periodic_boundary_mask)
call messages_experimental('PeriodicBoundaryMask')
call parse_block_end(blk)
end if
! Initialize coordinate system
call initialize_coordinate_system(gr, namespace, space, latt, n_sites, site_position)
! initialize derivatives
call derivatives_init(gr%der, namespace, space, gr%coord_system)
! the stencil used to generate the grid is a union of a cube (for
! multigrid) and the Laplacian
call stencil_cube_get_lapl(cube, space%dim, order = 2)
call stencil_union(cube, gr%der%lapl%stencil, gr%stencil)
call stencil_end(cube)
enlarge = 2
enlarge = max(enlarge, gr%der%n_ghost)
call mesh_init_stage_1(gr, namespace, space, gr%box, gr%coord_system, grid_spacing, enlarge)
call mesh_init_stage_2(gr, namespace, space, gr%box, gr%stencil)
POP_SUB(grid_init_stage_1)
end subroutine grid_init_stage_1
- the second stage:
Expand me...
subroutine grid_init_stage_2(gr, namespace, space, mc, qvector)
type(grid_t), target, intent(inout) :: gr
type(namespace_t), intent(in) :: namespace
class(space_t), intent(in) :: space
type(multicomm_t), intent(in) :: mc
real(real64), optional, intent(in) :: qvector(:)
PUSH_SUB(grid_init_stage_2)
call mesh_init_stage_3(gr, namespace, space, gr%stencil, mc)
call nl_operator_global_init(namespace)
call derivatives_build(gr%der, namespace, space, gr, qvector)
! print info concerning the grid
call grid_write_info(gr, namespace=namespace)
if(space%dim == 3) then
call symmetrizer_init(gr%symmetrizer, gr, gr%symm)
end if
POP_SUB(grid_init_stage_2)
end subroutine grid_init_stage_2
Mesh initialization:
- First stage: set up the mesh_t::idx index structure, defining the lower and upper bounds.
Expand me...
subroutine mesh_init_stage_1(mesh, namespace, space, box, coord_system, spacing, enlarge)
class(mesh_t), intent(inout) :: mesh
type(namespace_t), intent(in) :: namespace
class(space_t), intent(in) :: space
class(box_t), target, intent(in) :: box
class(coordinate_system_t), target, intent(in) :: coord_system
real(real64), intent(in) :: spacing(1:space%dim)
integer, intent(in) :: enlarge(1:space%dim)
integer :: idir, jj, delta
real(real64) :: x(space%dim), chi(space%dim), spacing_new(-1:1), box_bounds(2, space%dim)
logical :: out
PUSH_SUB_WITH_PROFILE(mesh_init_stage_1)
mesh%box => box
SAFE_ALLOCATE(mesh%spacing(1:space%dim))
mesh%spacing = spacing ! this number can change in the following
mesh%use_curvilinear = coord_system%local_basis
mesh%coord_system => coord_system
call index_init(mesh%idx, space%dim)
mesh%idx%enlarge = enlarge
! get box bounds along the axes that generate the grid points
select type (coord_system)
class is (affine_coordinates_t)
box_bounds = box%bounds(coord_system%basis)
class default
box_bounds = box%bounds()
end select
! adjust nr
do idir = 1, space%dim
chi = M_ZERO
! the upper border
jj = 0
out = .false.
do while(.not.out)
jj = jj + 1
chi(idir) = real(jj, real64) * mesh%spacing(idir)
if (mesh%use_curvilinear) then
x = coord_system%to_cartesian(chi)
out = x(idir) > maxval(abs(box_bounds(:, idir))) + BOX_BOUNDARY_DELTA
else
! do the same comparison here as in simul_box_contains_points
out = chi(idir) > maxval(abs(box_bounds(:, idir))) + BOX_BOUNDARY_DELTA
end if
end do
mesh%idx%nr(2, idir) = jj - 1
end do
! we have a symmetric mesh (for now)
mesh%idx%nr(1,:) = -mesh%idx%nr(2,:)
! we have to adjust a couple of things for the periodic directions
do idir = 1, space%periodic_dim
if (mesh%idx%nr(2, idir) == 0) then
! this happens if Spacing > box size
mesh%idx%nr(2, idir) = 1
mesh%idx%nr(1, idir) = -1
end if
! We have to adjust the spacing to be commensurate with the box,
! for this we scan the possible values of the grid size around the
! one we selected. We choose the size that has the spacing closest
! to the requested one.
do delta = -1, 1
spacing_new(delta) = M_TWO*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
spacing_new(delta) = abs(spacing_new(delta) - spacing(idir))
end do
delta = minloc(spacing_new, dim = 1) - 2
ASSERT(delta >= -1)
ASSERT(delta <= 1)
mesh%spacing(idir) = M_TWO*maxval(abs(box_bounds(:, idir))) / real(2 * mesh%idx%nr(2, idir) + 1 - delta, real64)
! we need to adjust the grid by adding or removing one point
if (delta == -1) then
mesh%idx%nr(1, idir) = mesh%idx%nr(1, idir) - 1
else if (delta == 1) then
mesh%idx%nr(2, idir) = mesh%idx%nr(2, idir) - 1
end if
end do
if ( any(abs(mesh%spacing - spacing) > 1.e-6_real64) ) then
call messages_write('The spacing has been modified to make it commensurate with the periodicity of the system.')
call messages_warning()
end if
do idir = space%periodic_dim + 1, space%dim
if (mesh%idx%nr(2, idir) == 0) then
write(message(1),'(a,i2)') 'Spacing > box size in direction ', idir
call messages_fatal(1, namespace=namespace)
end if
end do
mesh%idx%ll = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
! compute strides for cubic indices
mesh%idx%stride(:) = 1
do idir = 2, space%dim+1
mesh%idx%stride(idir) = mesh%idx%stride(idir-1) * &
(mesh%idx%ll(idir-1) + 2*mesh%idx%enlarge(idir-1))
end do
POP_SUB_WITH_PROFILE(mesh_init_stage_1)
end subroutine mesh_init_stage_1
- Second stage:
Expand me...
subroutine mesh_init_stage_2(mesh, namespace, space, box, stencil, regenerate)
class(mesh_t), intent(inout) :: mesh
type(namespace_t), intent(in) :: namespace
class(space_t), intent(in) :: space
class(box_t), intent(in) :: box
type(stencil_t), intent(in) :: stencil
logical, optional, intent(in) :: regenerate
integer :: is
real(real64) :: chi(1:space%dim)
real(real64) :: pos(space%dim)
integer :: point(space%dim), point_stencil(space%dim), grid_sizes(space%dim)
integer(int64) :: global_size
integer(int32) :: local_size
integer(int64) :: ispatial, ispatialb, istart, iend, spatial_size, ipg
integer :: ip, ib, ib2, np, np_boundary, ii
logical :: found
type(lihash_t) :: spatial_to_boundary
integer(int64), allocatable :: boundary_to_spatial(:), boundary_to_spatial_reordered(:)
integer(int64), allocatable :: grid_to_spatial(:), grid_to_spatial_initial(:), grid_to_spatial_reordered(:)
integer(int64), allocatable :: spatial_to_grid(:)
integer, allocatable :: sizes(:)
integer(int64), allocatable :: offsets(:)
integer :: size_boundary
#ifdef HAVE_MPI
integer(int64), pointer :: ptr(:)
type(mpi_grp_t) :: internode_grp, intranode_grp
#endif
PUSH_SUB_WITH_PROFILE(mesh_init_stage_2)
if (.not. optional_default(regenerate, .false.)) then
! enlarge mesh for boundary points
mesh%idx%nr(1, :) = mesh%idx%nr(1, :) - mesh%idx%enlarge(:)
mesh%idx%nr(2, :) = mesh%idx%nr(2, :) + mesh%idx%enlarge(:)
end if
!%Variable MeshIndexType
!%Type integer
!%Default idx_cubic
!%Section Mesh
!%Description
!% Determine index type. Must be the same for restarting a calculation.
!%Option idx_cubic 1
!% Cubic indices are used to map the spatial information to the grid points.
!%Option idx_hilbert 2
!% A Hilbert space-filling curve is used to map the spatial information to
!% the grid points.
!%End
call parse_variable(namespace, 'MeshIndexType', IDX_CUBIC, mesh%idx%type)
grid_sizes = mesh%idx%nr(2, :) - mesh%idx%nr(1, :) + 1
mesh%idx%offset = grid_sizes/2
if (space%dim > 1 .and. any(grid_sizes > 2**(int(63/space%dim, int64)))) then
write(message(1), '(A, I10, A, I2, A)') "Error: grid too large, more than ", 2**(int(63/space%dim, int64)), &
" points in one direction for ", space%dim, " dimensions. This is not supported."
call messages_fatal(1, namespace=namespace)
end if
global_size = product(int(grid_sizes, int64))
! compute the bits per dimension: grid_sizes(i) <= 2**bits
mesh%idx%bits = maxval(ceiling(log(real(grid_sizes, real64) )/log(2.)))
if (mesh%idx%type == IDX_CUBIC) then
spatial_size = global_size
else if (mesh%idx%type == IDX_HILBERT) then
spatial_size = 2**(space%dim*mesh%idx%bits)
end if
! use block data decomposition of spatial indices
istart = spatial_size * mpi_world%rank/mpi_world%size
iend = spatial_size * (mpi_world%rank+1)/mpi_world%size - 1
if (.not. (iend - istart + 1 < huge(0_int32))) then
write(message(1), '(A, I10, A, I2, A)') "Error: local grid too large, more than ", &
huge(0_int32), " points. This is not supported. Maybe use more MPI ranks?"
call messages_fatal(1, namespace=namespace)
end if
local_size = int(iend - istart + 1, int32)
SAFE_ALLOCATE(grid_to_spatial_initial(1:local_size))
! get inner grid indices
ip = 1
do ispatial = istart, iend
call index_spatial_to_point(mesh%idx, space%dim, ispatial, point)
! first check if point is outside bounding box
if (any(point < mesh%idx%nr(1, :) + mesh%idx%enlarge)) cycle
if (any(point > mesh%idx%nr(2, :) - mesh%idx%enlarge)) cycle
! then check if point is inside simulation box
chi = real(point, real64) * mesh%spacing
pos = mesh%coord_system%to_cartesian(chi)
if (.not. box%contains_point(pos)) cycle
grid_to_spatial_initial(ip) = ispatial
ASSERT(ip + 1 < huge(ip))
ip = ip + 1
end do
np = ip - 1
call rebalance_array(grid_to_spatial_initial(1:np), grid_to_spatial, sizes)
np = sizes(mpi_world%rank)
SAFE_DEALLOCATE_A(grid_to_spatial_initial)
SAFE_ALLOCATE(spatial_to_grid(grid_to_spatial(1):grid_to_spatial(np)))
SAFE_DEALLOCATE_A(sizes)
!$omp parallel do
do ispatial = grid_to_spatial(1), grid_to_spatial(np)
spatial_to_grid(ispatial) = -1
end do
!$omp parallel do
do ip = 1, np
spatial_to_grid(grid_to_spatial(ip)) = ip
end do
! get local boundary indices
call lihash_init(spatial_to_boundary)
size_boundary = np
SAFE_ALLOCATE(boundary_to_spatial(1:size_boundary))
ib = 1
do ip = 1, np
call index_spatial_to_point(mesh%idx, space%dim, grid_to_spatial(ip), point)
do is = 1, stencil%size
if (stencil%center == is) cycle
point_stencil(1:space%dim) = point(1:space%dim) + stencil%points(1:space%dim, is)
! check if point is in inner part
call index_point_to_spatial(mesh%idx, space%dim, ispatialb, point_stencil)
ASSERT(ispatialb >= 0)
if (ispatialb >= lbound(spatial_to_grid, dim=1, kind=int64) .and. &
ispatialb <= ubound(spatial_to_grid, dim=1, kind=int64)) then
if (spatial_to_grid(ispatialb) > 0) cycle
end if
! then check if point is inside simulation box
chi = real(point_stencil, real64) * mesh%spacing
pos = mesh%coord_system%to_cartesian(chi)
if (box%contains_point(pos)) cycle
! it has to be a boundary point now
! check if already counted
ib2 = lihash_lookup(spatial_to_boundary, ispatialb, found)
if (found) cycle
boundary_to_spatial(ib) = ispatialb
call lihash_insert(spatial_to_boundary, ispatialb, ib)
ib = ib + 1
! enlarge array
if (ib >= size_boundary) then
size_boundary = size_boundary * 2
call make_array_larger(boundary_to_spatial, size_boundary)
end if
end do
end do
np_boundary = ib - 1
call lihash_end(spatial_to_boundary)
SAFE_DEALLOCATE_A(spatial_to_grid)
! reorder inner points
call reorder_points(namespace, space, mesh%idx, grid_to_spatial, grid_to_spatial_reordered)
SAFE_DEALLOCATE_A(grid_to_spatial)
call rebalance_array(grid_to_spatial_reordered, grid_to_spatial, sizes)
np = sizes(mpi_world%rank)
mesh%np_global = sizes(0)
do ii = 1, mpi_world%size - 1
mesh%np_global = mesh%np_global + sizes(ii)
end do
SAFE_DEALLOCATE_A(sizes)
SAFE_DEALLOCATE_A(grid_to_spatial_reordered)
! reorder boundary points
call make_array_larger(boundary_to_spatial, np_boundary)
call reorder_points(namespace, space, mesh%idx, boundary_to_spatial, boundary_to_spatial_reordered)
SAFE_DEALLOCATE_A(boundary_to_spatial)
call rebalance_array(boundary_to_spatial_reordered, boundary_to_spatial, sizes)
SAFE_DEALLOCATE_A(boundary_to_spatial_reordered)
! global grid size
np_boundary = sizes(mpi_world%rank)
mesh%np_part_global = mesh%np_global + sizes(0)
do ii = 1, mpi_world%size - 1
mesh%np_part_global = mesh%np_part_global + sizes(ii)
end do
SAFE_DEALLOCATE_A(sizes)
! get global indices
#ifdef HAVE_MPI
! create shared memory window and fill it only on root
call create_intranode_communicator(mpi_world, intranode_grp, internode_grp)
call lmpi_create_shared_memory_window(mesh%np_part_global, intranode_grp, &
mesh%idx%window_grid_to_spatial, mesh%idx%grid_to_spatial_global)
#else
SAFE_ALLOCATE(mesh%idx%grid_to_spatial_global(1:mesh%np_part_global))
#endif
! inner grid
call get_sizes_offsets(np, sizes, offsets)
call mpi_world%gatherv(grid_to_spatial, np, MPI_INTEGER8, &
mesh%idx%grid_to_spatial_global, sizes, offsets, MPI_INTEGER8, 0)
! boundary indices
call get_sizes_offsets(np_boundary, sizes, offsets)
call mpi_world%gatherv(boundary_to_spatial, np_boundary, MPI_INTEGER8, &
mesh%idx%grid_to_spatial_global(mesh%np_global+1:), sizes, offsets, MPI_INTEGER8, 0)
! fill global hash map
#ifdef HAVE_MPI
! create shared memory window and fill it only on root
call lmpi_create_shared_memory_window(spatial_size, intranode_grp, &
mesh%idx%window_spatial_to_grid, ptr)
mesh%idx%spatial_to_grid_global(0:spatial_size-1) => ptr(1:spatial_size)
#else
SAFE_ALLOCATE(mesh%idx%spatial_to_grid_global(0:spatial_size-1))
#endif
if (mpi_grp_is_root(mpi_world)) then
! fill only on root, then broadcast
!$omp parallel do
do ispatial = 0, spatial_size-1
mesh%idx%spatial_to_grid_global(ispatial) = 0
end do
!$omp parallel do
do ipg = 1, mesh%np_part_global
mesh%idx%spatial_to_grid_global(mesh%idx%grid_to_spatial_global(ipg)) = ipg
end do
end if
#ifdef HAVE_MPI
! now broadcast the global arrays to local rank 0 on each node
if (intranode_grp%rank == 0) then
call internode_grp%bcast(mesh%idx%grid_to_spatial_global(1), mesh%np_part_global, MPI_INTEGER8, 0)
call internode_grp%bcast(mesh%idx%spatial_to_grid_global(0), spatial_size, MPI_INTEGER8, 0)
end if
call lmpi_sync_shared_memory_window(mesh%idx%window_grid_to_spatial, intranode_grp)
call lmpi_sync_shared_memory_window(mesh%idx%window_spatial_to_grid, intranode_grp)
#endif
SAFE_DEALLOCATE_A(offsets)
SAFE_DEALLOCATE_A(sizes)
SAFE_DEALLOCATE_A(boundary_to_spatial)
SAFE_DEALLOCATE_A(grid_to_spatial)
POP_SUB_WITH_PROFILE(mesh_init_stage_2)
end subroutine mesh_init_stage_2
-
Third stage:
We can finally generate the list of mesh points mesh_t::x. At this stage, domain decomposition will be considered and x(:,:) contains only the domain local points.
Expand me...
! ---------------------------------------------------------
!> When running parallel in domains, stencil and np_stencil
!! are needed to compute the ghost points.
!! mpi_grp is the communicator group that will be used for
!! this mesh.
! ---------------------------------------------------------
subroutine mesh_init_stage_3(mesh, namespace, space, stencil, mc, parent, regenerate)
class(mesh_t), intent(inout) :: mesh
type(namespace_t), intent(in) :: namespace
class(space_t), intent(in) :: space
type(stencil_t), intent(in) :: stencil
type(multicomm_t), intent(in) :: mc
type(mesh_t), optional, intent(in) :: parent
logical, optional, intent(in) :: regenerate
integer :: ip
PUSH_SUB_WITH_PROFILE(mesh_init_stage_3)
call mpi_grp_init(mesh%mpi_grp, mc%group_comm(P_STRATEGY_DOMAINS))
! check if we are running in parallel in domains
mesh%parallel_in_domains = (mesh%mpi_grp%size > 1)
call checksum_calculate(1, mesh%np_part_global, mesh%idx%grid_to_spatial_global(1), &
mesh%idx%checksum)
if (mesh%parallel_in_domains) then
call do_partition()
else
! When running serially those two are the same.
ASSERT(mesh%np_part_global < huge(mesh%np_part))
mesh%np = i8_to_i4(mesh%np_global)
mesh%np_part = i8_to_i4(mesh%np_part_global)
! These must be initialized for par_vec_gather, par_vec_scatter to work
! as copy operations when running without domain parallelization.
mesh%pv%np_global = mesh%np_global
mesh%pv%np_ghost = 0
mesh%pv%np_bndry = mesh%np_part - mesh%np
mesh%pv%npart = 1
mesh%pv%xlocal = 1
end if
! Compute mesh%x
SAFE_ALLOCATE(mesh%x(1:mesh%np_part, 1:space%dim))
!$omp parallel do
do ip = 1, mesh%np_part
mesh%x(ip, 1:space%dim) = M_ZERO
end do
do ip = 1, mesh%np_part
mesh%x(ip, 1:space%dim) = mesh_x_global(mesh, mesh_local2global(mesh, ip))
end do
call mesh_get_vol_pp()
POP_SUB_WITH_PROFILE(mesh_init_stage_3)
contains
! ---------------------------------------------------------
subroutine do_partition()
#ifdef HAVE_MPI
integer :: jj, ipart, jpart
integer(int64) :: ipg
integer, allocatable :: gindex(:), gedges(:)
logical, allocatable :: nb(:, :)
integer :: idx(space%dim), jx(space%dim)
type(MPI_Comm) :: graph_comm
integer :: iedge, nnb
logical :: use_topo, reorder, partition_print
integer :: ierr
logical :: has_virtual_partition = .false.
integer :: vsize !< 'virtual' partition size
type(restart_t) :: restart_load, restart_dump
integer, allocatable :: part_vec(:)
PUSH_SUB(mesh_init_stage_3.do_partition)
!Try to load the partition from the restart files
if (.not. optional_default(regenerate, .false.)) then
call restart_init(restart_load, namespace, RESTART_PARTITION, RESTART_TYPE_LOAD, mc, ierr, mesh=mesh, exact=.true.)
if (ierr == 0) call mesh_partition_load(restart_load, mesh, ierr)
call restart_end(restart_load)
else
ierr = 0
end if
if (ierr /= 0) then
!%Variable MeshPartitionVirtualSize
!%Type integer
!%Default mesh mpi_grp size
!%Section Execution::Parallelization
!%Description
!% Gives the possibility to change the partition nodes.
!% Afterward, it crashes.
!%End
call parse_variable(namespace, 'MeshPartitionVirtualSize', mesh%mpi_grp%size, vsize)
if (vsize /= mesh%mpi_grp%size) then
write(message(1),'(a,I7)') "Changing the partition size to", vsize
write(message(2),'(a)') "The execution will crash."
call messages_warning(2, namespace=namespace)
has_virtual_partition = .true.
else
has_virtual_partition = .false.
end if
if (.not. present(parent)) then
call mesh_partition(mesh, namespace, space, stencil, vsize)
else
! if there is a parent grid, use its partition
call mesh_partition_from_parent(mesh, parent)
end if
!Now that we have the partitions, we save them
call restart_init(restart_dump, namespace, RESTART_PARTITION, RESTART_TYPE_DUMP, mc, ierr, mesh=mesh)
call mesh_partition_dump(restart_dump, mesh, vsize, ierr)
call restart_end(restart_dump)
end if
if (has_virtual_partition) then
call profiling_end(namespace)
call print_date("Calculation ended on ")
write(message(1),'(a)') "Execution has ended."
write(message(2),'(a)') "If you want to run your system, do not use MeshPartitionVirtualSize."
call messages_warning(2, namespace=namespace)
call messages_end()
call global_end()
stop
end if
!%Variable MeshUseTopology
!%Type logical
!%Default false
!%Section Execution::Parallelization
!%Description
!% (experimental) If enabled, <tt>Octopus</tt> will use an MPI virtual
!% topology to map the processors. This can improve performance
!% for certain interconnection systems.
!%End
call parse_variable(namespace, 'MeshUseTopology', .false., use_topo)
if (use_topo) then
! At the moment we still need the global partition. This will be removed in near future.
SAFE_ALLOCATE(part_vec(1:mesh%np_part_global))
call partition_get_global(mesh%partition, part_vec(1:mesh%np_global))
! generate a table of neighbours
SAFE_ALLOCATE(nb(1:mesh%mpi_grp%size, 1:mesh%mpi_grp%size))
nb = .false.
do ipg = 1, mesh%np_global
ipart = part_vec(ipg)
call mesh_global_index_to_coords(mesh, ipg, idx)
do jj = 1, stencil%size
jx = idx + stencil%points(:, jj)
if (all(jx >= mesh%idx%nr(1, :)) .and. all(jx <= mesh%idx%nr(2, :))) then
jpart = part_vec(mesh_global_index_from_coords(mesh, jx))
if (ipart /= jpart ) nb(ipart, jpart) = .true.
end if
end do
end do
SAFE_DEALLOCATE_A(part_vec)
! now generate the information of the graph
SAFE_ALLOCATE(gindex(1:mesh%mpi_grp%size))
SAFE_ALLOCATE(gedges(1:count(nb)))
! and now generate it
iedge = 0
do ipart = 1, mesh%mpi_grp%size
do jpart = 1, mesh%mpi_grp%size
if (nb(ipart, jpart)) then
iedge = iedge + 1
gedges(iedge) = jpart - 1
end if
end do
gindex(ipart) = iedge
end do
ASSERT(iedge == count(nb))
reorder = .true.
call MPI_Graph_create(mesh%mpi_grp%comm, mesh%mpi_grp%size, gindex, gedges, reorder, graph_comm, mpi_err)
! we have a new communicator
call mpi_grp_init(mesh%mpi_grp, graph_comm)
SAFE_DEALLOCATE_A(nb)
SAFE_DEALLOCATE_A(gindex)
SAFE_DEALLOCATE_A(gedges)
end if
if (optional_default(regenerate, .false.)) call par_vec_end(mesh%pv)
call par_vec_init(mesh%mpi_grp, mesh%np_global, mesh%idx, stencil,&
space, mesh%partition, mesh%pv, namespace)
! check the number of ghost neighbours in parallel
nnb = 0
jpart = mesh%pv%partno
do ipart = 1, mesh%pv%npart
if (ipart == jpart) cycle
if (mesh%pv%ghost_scounts(ipart) /= 0) nnb = nnb + 1
end do
ASSERT(nnb >= 0 .and. nnb < mesh%pv%npart)
! Set local point numbers.
mesh%np = mesh%pv%np_local
mesh%np_part = mesh%np + mesh%pv%np_ghost + mesh%pv%np_bndry
!%Variable PartitionPrint
!%Type logical
!%Default true
!%Section Execution::Parallelization
!%Description
!% (experimental) If disabled, <tt>Octopus</tt> will not compute
!% nor print the partition information, such as local points,
!% no. of neighbours, ghost points and boundary points.
!%End
call parse_variable(namespace, 'PartitionPrint', .true., partition_print)
if (partition_print) then
call mesh_partition_write_info(mesh, namespace=namespace)
call mesh_partition_messages_debug(mesh, namespace)
end if
#endif
POP_SUB(mesh_init_stage_3.do_partition)
end subroutine do_partition
which ‘serializes’ the mesh by a choice of space-filling curve methods.