60 integer :: nglobal = 0
61 logical :: parallel = .false.
62 integer,
allocatable :: node(:)
63 integer,
allocatable :: range(:, :)
66 integer,
allocatable :: num(:)
68 type(mpi_grp_t) :: mpi_grp
74 type(distributed_t),
intent(out) :: this
75 integer,
optional,
intent(in) :: total
79 if (
present(total))
then
91 type(distributed_t),
intent(out) :: this
92 integer,
intent(in) :: total
93 type(MPI_Comm),
intent(in) :: comm
94 character(len=*),
optional,
intent(in) :: tag
95 logical,
optional,
intent(in) :: scalapack_compat
102 safe_allocate(this%node(1:this%nglobal))
105 safe_allocate(this%num(0:this%mpi_grp%size - 1))
106 safe_allocate(this%range(1:2, 0:this%mpi_grp%size - 1))
109 if (this%mpi_grp%size == 1 .or. this%nglobal == 1)
then
110 this%node(1:total) = 0
114 this%parallel = .false.
115 this%range(:, 0) = [1, total]
120 this%parallel = .
true.
123 lsize = this%num, scalapack_compat = scalapack_compat)
125 this%start = this%range(1, this%mpi_grp%rank)
126 this%end = this%range(2, this%mpi_grp%rank)
127 this%nlocal = this%num(this%mpi_grp%rank)
129 do i = 0, this%mpi_grp%size - 1
130 this%node(this%range(1, i):this%range(2, i)) = i
133 if (
present(tag))
then
134 message(1) =
'Info: Parallelization in ' // trim(tag)
136 do i = 0, this%mpi_grp%size - 1
137 write(
message(1),
'(a,i4,a,i6,a)')
'Info: Node in group ', i, &
138 ' will manage ', this%num(i),
' '//trim(tag)
139 if (this%num(i) > 0)
then
140 write(
message(1),
'(a,a,i6,a,i6)') trim(
message(1)),
':', this%range(1, i),
" - ", this%range(2, i)
165 out%nlocal = in%nlocal
166 out%nglobal = in%nglobal
167 out%parallel = in%parallel
170 size = in%mpi_grp%size
174 if (
allocated(in%node))
then
175 safe_allocate(out%node(1:in%nglobal))
176 out%node(1:in%nglobal) = in%node(1:in%nglobal)
179 if (
allocated(in%range))
then
180 safe_allocate(out%range(1:2, 0:
size - 1))
181 out%range(1:2, 0:
size - 1) = in%range(1:2, 0:
size - 1)
184 if (
allocated(in%num))
then
185 safe_allocate(out%num(0:
size - 1))
186 out%num(0:
size - 1) = in%num(0:
size - 1)
198 safe_deallocate_a(this%node)
199 safe_deallocate_a(this%range)
200 safe_deallocate_a(this%num)
208 real(real64),
contiguous,
intent(inout) :: aa(:)
210 integer,
allocatable :: displs(:)
212 if (.not. this%parallel)
return
216 safe_allocate(displs(0:this%mpi_grp%size - 1))
218 displs(0:this%mpi_grp%size - 1) = this%range(1, 0:this%mpi_grp%size - 1) - 1
221 call mpi_allgatherv(mpi_in_place, this%nlocal, mpi_double_precision, &
222 aa(1), this%num, displs, mpi_double_precision, this%mpi_grp%comm,
mpi_err)
225 safe_deallocate_a(displs)
subroutine, public distributed_end(this)
subroutine, public distributed_nullify(this, total)
subroutine, public distributed_init(this, total, comm, tag, scalapack_compat)
Distribute N instances across M processes of communicator comm
subroutine, public distributed_copy(in, out)
@Brief Create a copy of a distributed instance
subroutine, public distributed_allgather(this, aa)
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
subroutine, public messages_info(no_lines, iunit, debug_only, stress, all_nodes, namespace)
type(mpi_comm), parameter, public mpi_comm_undefined
used to indicate a communicator has not been initialized
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
integer, public mpi_err
used to store return values of mpi calls
This module handles the communicators for the various parallelization strategies.
subroutine, public multicomm_divide_range(nobjs, nprocs, istart, ifinal, lsize, scalapack_compat)
Divide the range of numbers [1, nobjs] between nprocs processors.
Distribution of N instances over mpi_grpsize processes, for the local rank mpi_grprank....