Octopus
boundaries.F90
Go to the documentation of this file.
1!! Copyright (C) 2005-2010 Florian Lorenzen, Heiko Appel, X. Andrade
2!! Copyright (C) 2021 Sebastian Ohlmann
3!!
4!! This program is free software; you can redistribute it and/or modify
5!! it under the terms of the GNU General Public License as published by
6!! the Free Software Foundation; either version 2, or (at your option)
7!! any later version.
8!!
9!! This program is distributed in the hope that it will be useful,
10!! but WITHOUT ANY WARRANTY; without even the implied warranty of
11!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12!! GNU General Public License for more details.
13!!
14!! You should have received a copy of the GNU General Public License
15!! along with this program; if not, write to the Free Software
16!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17!! 02110-1301, USA.
18!!
19
20#include "global.h"
21
30 use accel_oct_m
31 use batch_oct_m
33 use debug_oct_m
34 use global_oct_m
35 use index_oct_m
36 use math_oct_m
38 use mesh_oct_m
39 use mpi_oct_m
44 use parser_oct_m
46 use space_oct_m
47 use types_oct_m
48 use unit_oct_m
51
52 implicit none
53
54 private
55
63 !
64 type boundaries_t
65 private
66 logical :: periodic = .false.
67 logical :: fully_periodic = .false.
68 integer :: nper = 0
69 integer, allocatable :: per_points(:, :)
70 integer, allocatable :: per_send(:, :)
71 integer, allocatable :: per_recv(:, :)
72 logical :: per_contiguous
73 integer, allocatable :: nsend(:)
74 integer, allocatable :: nrecv(:)
75 type(accel_mem_t) :: buff_per_points
76 type(accel_mem_t) :: buff_per_send
77 type(accel_mem_t) :: buff_per_recv
78 type(accel_mem_t) :: buff_nsend
79 type(accel_mem_t) :: buff_nrecv
80 logical, public :: spiralBC = .false.
81 logical, public :: spiral = .false.
82 real(real64), allocatable, public :: spiral_q(:)
83 end type boundaries_t
84
85 public :: &
90
91 public :: &
99
100 integer, parameter, public :: &
101 POINT_BOUNDARY = 1, &
102 point_inner = 2
103
109 private
110 type(batch_t) :: ghost_send
111 type(MPI_Request), allocatable :: requests(:)
112 integer :: nnb
113 ! these are needed for CL
114 real(real64), pointer :: drecv_buffer(:)
115 complex(real64), pointer :: zrecv_buffer(:)
116 real(real64), pointer :: dsend_buffer(:)
117 complex(real64), pointer :: zsend_buffer(:)
118 type(batch_t), pointer :: v_local
119 type(par_vec_t), pointer :: pv
121
123 module procedure boundaries_set_batch
124 module procedure dboundaries_set_single
125 module procedure zboundaries_set_single
126 end interface boundaries_set
127
128contains
129
130 ! ---------------------------------------------------------
133 subroutine boundaries_init(this, namespace, space, mesh, qvector)
134 type(boundaries_t), intent(inout) :: this
135 type(namespace_t), intent(in) :: namespace
136 class(space_t), intent(in) :: space
137 type(mesh_t), target, intent(in) :: mesh
138 real(real64), optional, intent(in) :: qvector(:)
139
140 integer :: sp, ip, ip_inner, iper, ip_bnd
141 integer(int64) :: ip_inner_global
142 integer :: ipart
143 integer(int64), allocatable :: recv_rem_points(:, :), points(:), per_send(:, :)
144 integer, allocatable :: part(:), points_local(:)
145 integer :: nper_recv, iper_recv
146 integer, allocatable :: rdispls(:), sdispls(:)
147
148 push_sub(boundaries_init)
149
150
151 this%periodic = space%is_periodic()
152 this%fully_periodic = space%periodic_dim == space%dim
153
154 if (space%is_periodic()) then
155
156 !%Variable SpiralBoundaryCondition
157 !%Type logical
158 !%Default no
159 !%Section Mesh
160 !%Description
161 !% (Experimental) If set to yes, Octopus will apply spin-spiral boundary conditions.
162 !% The momentum of the spin spiral is defined by the variable
163 !% <tt>TDMomentumTransfer</tt>
164 !%End
165 call parse_variable(namespace, 'SpiralBoundaryCondition', .false., this%spiralBC)
166 if (this%spiralBC) then
167 call messages_experimental("SpiralBoundaryCondition")
168 if(.not. present(qvector)) then
169 message(1) = "TDMomentumTransfer or TDReducedMomentumTransfer must be defined if SpiralBoundaryCondition=yes"
170 call messages_fatal(1, namespace=namespace)
171 end if
172 safe_allocate(this%spiral_q(1:space%dim))
173 this%spiral_q(1:space%dim) = qvector(1:space%dim)
174 end if
176 sp = mesh%np
177 if (mesh%parallel_in_domains) sp = mesh%np + mesh%pv%np_ghost
178
179 ! count the number of points that are periodic
180 this%nper = 0
181 nper_recv = 0
182 do ip = sp + 1, mesh%np_part
183 ip_inner_global = mesh_periodic_point(mesh, space, ip)
184 ip_inner = mesh_global2local(mesh, ip_inner_global)
185
186 ! it is the same point, can happen for mixed periodicity
187 if (ip == ip_inner) cycle
188 ! the point maps to the boundary, can happen for mixed periodicity
189 ! in this case the point is already set to zero, so we can ignore it
190 ! for different mixed boundary conditions, we would need to be careful here
191 if (ip_inner_global > mesh%np_global) cycle
192 ! now check if point is local or if it needs to be communicated
193 if (ip_inner /= 0 .and. ip_inner <= mesh%np) then
194 this%nper = this%nper + 1
195 else
196 nper_recv = nper_recv + 1
197 end if
198 end do
199 if (.not. mesh%parallel_in_domains) then
200 assert(nper_recv == 0)
201 end if
202
203 safe_allocate(this%per_points(1:2, 1:max(this%nper, 1)))
204 !$omp parallel do
205 do ip = 1, this%nper
206 this%per_points(1:2, ip) = -1
207 end do
209 if (mesh%parallel_in_domains) then
210 safe_allocate(this%per_recv(1:max(nper_recv, 1), 1:max(mesh%pv%npart, 1)))
211 safe_allocate(this%nrecv(1:mesh%pv%npart))
212 safe_allocate(recv_rem_points(1:nper_recv, 1:mesh%pv%npart))
213 safe_allocate(points(1:nper_recv))
214 safe_allocate(points_local(1:nper_recv))
215 safe_allocate(part(1:nper_recv))
216 this%nrecv = 0
217 end if
218
219 iper = 0
220 iper_recv = 0
221 do ip = sp + 1, mesh%np_part
222 ip_inner_global = mesh_periodic_point(mesh, space, ip)
223 ip_inner = mesh_global2local(mesh, ip_inner_global)
224
225 ! it is the same point, can happen for mixed periodicity
226 if (ip == ip_inner) cycle
227 ! the point maps to the boundary, can happen for mixed periodicity
228 ! in this case the point is already set to zero, so we can ignore it
229 if (ip_inner_global > mesh%np_global) cycle
230 ! now check if point is local or if it needs to be communicated
231 if (ip_inner /= 0 .and. ip_inner <= mesh%np) then
232 iper = iper + 1
233 this%per_points(point_boundary, iper) = ip
234 this%per_points(point_inner, iper) = ip_inner
235 else
236 ! this can only happen if parallel in domain
237 ! the point is on another node
238 iper_recv = iper_recv + 1
239 points(iper_recv) = ip_inner_global
240 points_local(iper_recv) = ip
241 end if
242 end do
243 if (mesh%parallel_in_domains) then
244 ! find the points in the other partitions
245 call partition_get_partition_number(mesh%partition, nper_recv, points, part)
246 do iper_recv = 1, nper_recv
247 ipart = part(iper_recv)
248 assert(this%nrecv(ipart) + 1 < huge(0_int32))
249 ! count the points to receive from each node
250 this%nrecv(ipart) = this%nrecv(ipart) + 1
251 ! and store the number of the point
252 this%per_recv(this%nrecv(ipart), ipart) = points_local(iper_recv)
253 ! and its global index
254 recv_rem_points(this%nrecv(ipart), ipart) = points(iper_recv)
255 end do
256 end if
257
258 if (mesh%parallel_in_domains) then
259 ! communicate the number of points to receive/send
260 safe_allocate(this%nsend(1:mesh%pv%npart))
261 call mesh%mpi_grp%alltoall(this%nrecv, 1, mpi_integer, this%nsend, 1, mpi_integer)
262
263 safe_allocate(sdispls(1:mesh%pv%npart))
264 safe_allocate(rdispls(1:mesh%pv%npart))
265 safe_allocate(this%per_send(1:max(maxval(this%nsend), 1), 1:mesh%pv%npart))
266 safe_allocate(per_send(1:maxval(this%nsend), 1:mesh%pv%npart))
267 ! compute displacements
268 assert(int(nper_recv, int64)*mesh%pv%npart < huge(0_int32))
269 do ipart = 1, mesh%pv%npart
270 rdispls(ipart) = nper_recv * (ipart - 1)
271 sdispls(ipart) = maxval(this%nsend) * (ipart - 1)
272 end do
273
274 ! exchange indices
275 call mesh%mpi_grp%alltoallv(recv_rem_points, this%nrecv, rdispls, mpi_integer8, &
276 per_send, this%nsend, sdispls, mpi_integer8)
277
278 do ipart = 1, mesh%pv%npart
279 ! get local index of the points to send
280 do ip = 1, this%nsend(ipart)
281 this%per_send(ip, ipart) = mesh_global2local(mesh, per_send(ip, ipart))
282 ! make sure we have local points here
283 assert(this%per_send(ip, ipart) > 0)
284 assert(this%per_send(ip, ipart) <= mesh%np)
285 end do
286 end do
287
288 safe_deallocate_a(per_send)
289 safe_deallocate_a(sdispls)
290 safe_deallocate_a(rdispls)
291 safe_deallocate_a(recv_rem_points)
292 safe_deallocate_a(points)
293 safe_deallocate_a(points_local)
294 safe_deallocate_a(part)
295 end if
296
297 ! We now determine if the boundary points are contiguous or not
298 ! If so, the first mapping boundaries%per_points(POINT_BOUNDARY, ip) is not necessary
299 this%per_contiguous = .true.
300 do ip = 1, this%nper-1
301 ip_bnd = this%per_points(point_boundary, ip)
302 if (ip_bnd + 1 /= this%per_points(point_boundary, ip+1)) then
303 this%per_contiguous = .false.
304 exit
305 end if
306 end do
307
308 if (accel_is_enabled()) then
309 call accel_create_buffer(this%buff_per_points, accel_mem_read_only, type_integer, 2*this%nper)
310 call accel_write_buffer(this%buff_per_points, 2*this%nper, this%per_points)
311
312 if (mesh%parallel_in_domains) then
313 call accel_create_buffer(this%buff_per_send, accel_mem_read_only, type_integer, product(ubound(this%per_send)))
314 call accel_write_buffer(this%buff_per_send, product(ubound(this%per_send)), this%per_send)
315
316 call accel_create_buffer(this%buff_per_recv, accel_mem_read_only, type_integer, product(ubound(this%per_recv)))
317 call accel_write_buffer(this%buff_per_recv, product(ubound(this%per_recv)), this%per_recv)
318
319 call accel_create_buffer(this%buff_nsend, accel_mem_read_only, type_integer, mesh%pv%npart)
320 call accel_write_buffer(this%buff_nsend, mesh%pv%npart, this%nsend)
321
322 call accel_create_buffer(this%buff_nrecv, accel_mem_read_only, type_integer, mesh%pv%npart)
323 call accel_write_buffer(this%buff_nrecv, mesh%pv%npart, this%nrecv)
324 end if
325 end if
326
327 end if
328
329 pop_sub(boundaries_init)
330 end subroutine boundaries_init
331
332 ! ---------------------------------------------------------
333
334 subroutine boundaries_end(this)
335 type(boundaries_t), intent(inout) :: this
336
337 push_sub(boundaries_end)
338
339 if (this%periodic) then
340 safe_deallocate_a(this%spiral_q)
341
342 safe_deallocate_a(this%per_send)
343 safe_deallocate_a(this%per_recv)
344 safe_deallocate_a(this%nsend)
345 safe_deallocate_a(this%nrecv)
346
347 if (accel_is_enabled()) then
348 call accel_release_buffer(this%buff_per_send)
349 call accel_release_buffer(this%buff_per_recv)
350 call accel_release_buffer(this%buff_nsend)
351 call accel_release_buffer(this%buff_nrecv)
352 end if
353
354 if (accel_is_enabled()) call accel_release_buffer(this%buff_per_points)
355
356 safe_deallocate_a(this%per_points)
357 end if
358
359 pop_sub(boundaries_end)
360 end subroutine boundaries_end
361
362 ! -------------------------------------------------------
363
364 subroutine boundaries_set_batch(this, mesh, ffb, phase_correction, buff_phase_corr, offset, async)
365 type(boundaries_t), intent(in) :: this
366 class(mesh_t), intent(in) :: mesh
367 class(batch_t), intent(inout) :: ffb
368 complex(real64), optional, intent(in) :: phase_correction(:)
369 type(accel_mem_t), optional,intent(in) :: buff_phase_corr
370 integer, optional, intent(in) :: offset
371 logical, optional, intent(in) :: async
372
373
374 push_sub(boundaries_set_batch)
375
376 if (ffb%type() == type_float) then
377 call dboundaries_set_batch(this, mesh, ffb, phase_correction, buff_phase_corr, offset, async)
378 else if (ffb%type() == type_cmplx) then
379 call zboundaries_set_batch(this, mesh, ffb, phase_correction, buff_phase_corr, offset, async)
380 else
381 assert(.false.)
382 end if
383
384 pop_sub(boundaries_set_batch)
385 end subroutine boundaries_set_batch
386
387#include "undef.F90"
388#include "complex.F90"
389#include "boundaries_inc.F90"
390
391#include "undef.F90"
392#include "real.F90"
393#include "boundaries_inc.F90"
394
395end module boundaries_oct_m
396
397!! Local Variables:
398!! mode: f90
399!! coding: utf-8
400!! End:
subroutine, public accel_release_buffer(this)
Definition: accel.F90:1246
pure logical function, public accel_is_enabled()
Definition: accel.F90:400
integer, parameter, public accel_mem_read_only
Definition: accel.F90:183
This module implements batches of mesh functions.
Definition: batch.F90:133
This module implements common operations on batches of mesh functions.
Definition: batch_ops.F90:116
Module implementing boundary conditions in Octopus.
Definition: boundaries.F90:122
subroutine dboundaries_set_batch(boundaries, mesh, ffb, phase_correction, buff_phase_corr, offset, async)
Set all boundary points in ffb.
subroutine, public dpar_vec_ghost_update(pv, v_local)
Updates ghost points of every node.
integer, parameter, public point_inner
Definition: boundaries.F90:193
subroutine, public zghost_update_batch_start(pv, v_local, handle)
Definition: boundaries.F90:586
subroutine zboundaries_set_batch(boundaries, mesh, ffb, phase_correction, buff_phase_corr, offset, async)
Set all boundary points in ffb.
Definition: boundaries.F90:804
subroutine, public boundaries_end(this)
Definition: boundaries.F90:428
subroutine, public zpar_vec_ghost_update(pv, v_local)
Updates ghost points of every node.
Definition: boundaries.F90:556
subroutine, public zghost_update_batch_finish(handle)
Definition: boundaries.F90:763
subroutine, public dghost_update_batch_finish(handle)
subroutine zboundaries_set_single(boundaries, mesh, ff, phase_correction, buff_phase_corr, offset)
set boundary conditions for a single mesh function
subroutine, public dghost_update_batch_start(pv, v_local, handle)
subroutine dboundaries_set_single(boundaries, mesh, ff, phase_correction, buff_phase_corr, offset)
set boundary conditions for a single mesh function
subroutine, public boundaries_init(this, namespace, space, mesh, qvector)
initialize the boundary contitions
Definition: boundaries.F90:227
subroutine boundaries_set_batch(this, mesh, ffb, phase_correction, buff_phase_corr, offset, async)
Definition: boundaries.F90:458
This module implements the index, used for the mesh points.
Definition: index.F90:122
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:115
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:118
integer(int64) function, public mesh_periodic_point(mesh, space, ip)
This function returns the point inside the grid corresponding to a boundary point when PBCs are used....
Definition: mesh.F90:716
integer function, public mesh_global2local(mesh, ipg)
This function returns the local mesh index for a given global index.
Definition: mesh.F90:967
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:420
subroutine, public messages_experimental(name, namespace)
Definition: messages.F90:1097
Some general things and nomenclature:
Definition: par_vec.F90:171
type(type_t), public type_float
Definition: types.F90:133
type(type_t), public type_cmplx
Definition: types.F90:134
type(type_t), public type_integer
Definition: types.F90:135
brief This module defines the class unit_t which is used by the unit_systems_oct_m module.
Definition: unit.F90:132
This module defines the unit system, used for input and output.
Class defining batches of mesh functions.
Definition: batch.F90:159
This class contains information about the boundary conditions.
Definition: boundaries.F90:157
auxiliary handle for ghost updates
Definition: boundaries.F90:201
Describes mesh distribution to nodes.
Definition: mesh.F90:186
int true(void)