Octopus
distributed.F90
Go to the documentation of this file.
1!! Copyright (C) 2008-2016 X. Andrade
2!!
3!! This program is free software; you can redistribute it and/or modify
4!! it under the terms of the GNU General Public License as published by
5!! the Free Software Foundation; either version 2, or (at your option)
6!! any later version.
7!!
8!! This program is distributed in the hope that it will be useful,
9!! but WITHOUT ANY WARRANTY; without even the implied warranty of
10!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11!! GNU General Public License for more details.
12!!
13!! You should have received a copy of the GNU General Public License
14!! along with this program; if not, write to the Free Software
15!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16!! 02110-1301, USA.
17!!
18
19#include "global.h"
20
22 use debug_oct_m
23 use global_oct_m
25 use mpi_oct_m
28
29 implicit none
30
31 private
32
33 public :: &
40
41 ! TODO(Alex) Create an issue for refactoring type distributed_t attributes
42 ! `distributed_nullify` is a meaningless routine - look at removing/consolidating
43 ! `distributed_allgather` is pointless and should be scrapped
44 ! - Only called in xc_vxc_inc.F90
45
55 type distributed_t
56 integer :: start = 1
57 integer :: end = 0
58 integer :: nlocal = 0
59 ! or (range(2, mpi_grp%rank) - range(1, mpi_grp%rank) + 1)
60 integer :: nglobal = 0
61 logical :: parallel = .false.
62 integer, allocatable :: node(:)
63 integer, allocatable :: range(:, :)
64 ! dim(2, 0:n_processes-1)
65 ! Defined the same for all processes of mpi_grp
66 integer, allocatable :: num(:)
67 ! dim(mpi_grp%size)
68 type(mpi_grp_t) :: mpi_grp
69 end type distributed_t
70
71contains
72
73 subroutine distributed_nullify(this, total)
74 type(distributed_t), intent(out) :: this
75 integer, optional, intent(in) :: total
76
77 push_sub(distributed_nullify)
78
79 if (present(total)) then
80 this%end = total
81 this%nlocal = total
82 this%nglobal = total
83 end if
84
85 pop_sub(distributed_nullify)
86 end subroutine distributed_nullify
87
88
90 subroutine distributed_init(this, total, comm, tag, scalapack_compat)
91 type(distributed_t), intent(out) :: this
92 integer, intent(in) :: total
93 type(MPI_Comm), intent(in) :: comm
94 character(len=*), optional, intent(in) :: tag
95 logical, optional, intent(in) :: scalapack_compat
96
97 integer :: i
98
99 push_sub(distributed_init)
100
101 this%nglobal = total
102 safe_allocate(this%node(1:this%nglobal))
103
104 call mpi_grp_init(this%mpi_grp, comm)
105 safe_allocate(this%num(0:this%mpi_grp%size - 1))
106 safe_allocate(this%range(1:2, 0:this%mpi_grp%size - 1))
107
108 ! Defaults
109 if (this%mpi_grp%size == 1 .or. this%nglobal == 1) then
110 this%node(1:total) = 0
111 this%start = 1
112 this%end = total
113 this%nlocal = total
114 this%parallel = .false.
115 this%range(:, 0) = [1, total]
116 this%num(0) = total
117 call mpi_grp_init(this%mpi_grp, mpi_comm_undefined)
118
119 else
120 this%parallel = .true.
121
122 call multicomm_divide_range(this%nglobal, this%mpi_grp%size, this%range(1, :), this%range(2, :), &
123 lsize = this%num, scalapack_compat = scalapack_compat)
124
125 this%start = this%range(1, this%mpi_grp%rank)
126 this%end = this%range(2, this%mpi_grp%rank)
127 this%nlocal = this%num(this%mpi_grp%rank)
128
129 do i = 0, this%mpi_grp%size - 1
130 this%node(this%range(1, i):this%range(2, i)) = i
131 end do
132
133 if (present(tag)) then
134 message(1) = 'Info: Parallelization in ' // trim(tag)
135 call messages_info(1)
136 do i = 0, this%mpi_grp%size - 1
137 write(message(1),'(a,i4,a,i6,a)') 'Info: Node in group ', i, &
138 ' will manage ', this%num(i), ' '//trim(tag)
139 if (this%num(i) > 0) then
140 write(message(1),'(a,a,i6,a,i6)') trim(message(1)), ':', this%range(1, i), " - ", this%range(2, i)
141 end if
142 call messages_info(1)
143 end do
144 end if
145
146 end if
147
149 end subroutine distributed_init
153 subroutine distributed_copy(in, out)
154 type(distributed_t), intent(in) :: in
155 type(distributed_t), intent(inout) :: out
157 integer :: size
158
160
162
163 out%start = in%start
164 out%end = in%end
165 out%nlocal = in%nlocal
166 out%nglobal = in%nglobal
167 out%parallel = in%parallel
168
169 ! TODO(Alex) This is a weird choice, could retreive from size(num), for example
170 size = in%mpi_grp%size
171
172 call mpi_grp_init(out%mpi_grp, in%mpi_grp%comm)
173
174 if (allocated(in%node)) then
175 safe_allocate(out%node(1:in%nglobal))
176 out%node(1:in%nglobal) = in%node(1:in%nglobal)
177 end if
178
179 if (allocated(in%range)) then
180 safe_allocate(out%range(1:2, 0:size - 1))
181 out%range(1:2, 0:size - 1) = in%range(1:2, 0:size - 1)
182 end if
184 if (allocated(in%num)) then
185 safe_allocate(out%num(0:size - 1))
186 out%num(0:size - 1) = in%num(0:size - 1)
187 end if
188
189 pop_sub(distributed_copy)
190 end subroutine distributed_copy
191
192
193 subroutine distributed_end(this)
194 type(distributed_t), intent(inout) :: this
195
196 push_sub(distributed_end)
197
198 safe_deallocate_a(this%node)
199 safe_deallocate_a(this%range)
200 safe_deallocate_a(this%num)
201
202 pop_sub(distributed_end)
203 end subroutine distributed_end
204
205
206 subroutine distributed_allgather(this, aa)
207 type(distributed_t), intent(in) :: this
208 real(real64), contiguous, intent(inout) :: aa(:)
209
210 integer, allocatable :: displs(:)
211
212 if (.not. this%parallel) return
213
214 push_sub(distributed_allgather)
215
216 safe_allocate(displs(0:this%mpi_grp%size - 1))
217
218 displs(0:this%mpi_grp%size - 1) = this%range(1, 0:this%mpi_grp%size - 1) - 1
219
220#ifdef HAVE_MPI
221 call mpi_allgatherv(mpi_in_place, this%nlocal, mpi_double_precision, &
222 aa(1), this%num, displs, mpi_double_precision, this%mpi_grp%comm, mpi_err)
223#endif
224
225 safe_deallocate_a(displs)
226
227 pop_sub(distributed_allgather)
228 end subroutine distributed_allgather
229
230end module distributed_oct_m
231
232
233!! Local Variables:
234!! mode: f90
235!! coding: utf-8
236!! End:
subroutine, public distributed_end(this)
subroutine, public distributed_nullify(this, total)
subroutine, public distributed_init(this, total, comm, tag, scalapack_compat)
Distribute N instances across M processes of communicator comm
subroutine, public distributed_copy(in, out)
@Brief Create a copy of a distributed instance
subroutine, public distributed_allgather(this, aa)
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_info(no_lines, iunit, debug_only, stress, all_nodes, namespace)
Definition: messages.F90:624
type(mpi_comm), parameter, public mpi_comm_undefined
used to indicate a communicator has not been initialized
Definition: mpi.F90:136
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:346
integer, public mpi_err
used to store return values of mpi calls
Definition: mpi.F90:269
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:145
subroutine, public multicomm_divide_range(nobjs, nprocs, istart, ifinal, lsize, scalapack_compat)
Divide the range of numbers [1, nobjs] between nprocs processors.
Definition: multicomm.F90:1035
Distribution of N instances over mpi_grpsize processes, for the local rank mpi_grprank....
int true(void)