Octopus
multicomm.F90
Go to the documentation of this file.
1!! Copyright (C) 2002-2015 M. Marques, A. Castro, A. Rubio, G. Bertsch, X. Andrade
2!!
3!! This program is free software; you can redistribute it and/or modify
4!! it under the terms of the GNU General Public License as published by
5!! the Free Software Foundation; either version 2, or (at your option)
6!! any later version.
7!!
8!! This program is distributed in the hope that it will be useful,
9!! but WITHOUT ANY WARRANTY; without even the implied warranty of
10!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11!! GNU General Public License for more details.
12!!
13!! You should have received a copy of the GNU General Public License
14!! along with this program; if not, write to the Free Software
15!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16!! 02110-1301, USA.
17!!
18
19#include "global.h"
20
51
52module multicomm_oct_m
53 use blacs_oct_m
55 use debug_oct_m
56 use global_oct_m
57 use, intrinsic :: iso_fortran_env
59 use mpi_oct_m
62#if defined(HAVE_OPENMP)
63 use omp_lib
64#endif
65 use parser_oct_m
67 use utils_oct_m
68
69 implicit none
70
71 private
72
73 public :: &
76#if defined(HAVE_MPI)
78#endif
87
88 integer, public, parameter :: &
89 P_MASTER = 1, &
90 p_slave = 2
91
92 integer, public, parameter :: &
93 PAR_AUTO = -1, &
94 par_no = 0
95
96 integer, parameter :: n_par_types = 4
97 character(len=11), parameter :: par_types(0:n_par_types) = &
98 (/ &
99 "serial ", &
100 "ParDomains", &
101 "ParStates ", &
102 "ParKPoints", &
103 "ParOther " &
104 /)
105
106 integer, parameter :: MAX_INDEX = 5
107
113 type multicomm_t
114 private
115 integer :: n_node
116
117 integer, public :: par_strategy
118
119 integer, allocatable :: group_sizes(:)
120 integer, allocatable, public :: who_am_i(:)
121 type(MPI_Comm), allocatable, public :: group_comm(:)
122 type(MPI_Comm), public :: dom_st_comm
123 type(MPI_Comm), public :: st_kpt_comm
124 type(MPI_Comm), public :: dom_st_kpt_comm
125 type(mpi_grp_t), public :: intranode_grp
126 type(mpi_grp_t), public :: internode_grp
127
128 integer :: nthreads
129 integer :: node_type
130 logical :: have_slaves
131
132 type(MPI_Comm) :: full_comm
133 integer :: full_comm_rank
134 type(MPI_Comm), public :: master_comm
135 integer :: master_comm_rank
136 type(MPI_Comm), public :: slave_intercomm
137
138 logical :: reorder_ranks
139 end type multicomm_t
140
143 private
144 type(mpi_grp_t) :: grp
145 integer :: rounds
146 integer, allocatable, public :: schedule(:, :)
147 end type multicomm_all_pairs_t
148
149contains
150
151 ! ---------------------------------------------------------
152 subroutine multicomm_all_pairs_copy(apout, apin)
153 type(multicomm_all_pairs_t), intent(inout) :: apout
154 type(multicomm_all_pairs_t), intent(in) :: apin
155
157
158 call mpi_grp_copy(apout%grp, apin%grp)
159 apout%rounds = apin%rounds
160 if (allocated(apin%schedule)) then
161 safe_allocate(apout%schedule(1:size(apin%schedule, 1), 1:size(apin%schedule, 2)))
162 apout%schedule = apin%schedule
163 end if
164
166 end subroutine multicomm_all_pairs_copy
167
168 ! ---------------------------------------------------------
170 subroutine multicomm_init(mc, namespace, base_grp, mode_para, n_node, index_range, min_range)
171 type(multicomm_t), intent(out) :: mc
172 type(namespace_t), intent(in) :: namespace
173 type(mpi_grp_t), intent(in) :: base_grp
174 type(calc_mode_par_t), intent(in) :: mode_para
175 integer, intent(in) :: n_node
176 integer(int64), intent(inout) :: index_range(:)
177 integer, intent(in) :: min_range(:)
178
179 integer :: ii, num_slaves, slave_level, ipar
180 integer :: parse(1:P_STRATEGY_MAX), default(1:P_STRATEGY_MAX)
181 integer :: parallel_mask, default_mask
182
183 push_sub(multicomm_init)
184
185 mc%n_node = n_node
186 parallel_mask = mode_para%parallel_mask()
187 default_mask = mode_para%default_parallel_mask()
188
189 call messages_print_with_emphasis(msg="Parallelization", namespace=namespace)
191 !%Variable ReorderRanks
192 !%Default no
193 !%Type logical
194 !%Section Execution::Parallelization
195 !%Description
196 !% This variable controls whether the ranks are reorganized to have a more
197 !% compact distribution with respect to domain parallelization which needs
198 !% to communicate most often. Depending on the system, this can improve
199 !% communication speeds.
200 !%End
201 call parse_variable(namespace, 'ReorderRanks', .false., mc%reorder_ranks)
202
203 call messages_obsolete_variable(namespace, 'ParallelizationStrategy')
204 call messages_obsolete_variable(namespace, 'ParallelizationGroupRanks')
205
206 do ipar = 1, p_strategy_max
207 default(ipar) = par_no
208 if (bitand(default_mask, ibset(0, ipar - 1)) /= 0) then
209 default(ipar) = par_auto
210 end if
211 end do
213 !%Variable ParDomains
214 !%Type integer
215 !%Default auto
216 !%Section Execution::Parallelization
217 !%Description
218 !% This variable controls the number of processors used for the
219 !% parallelization in domains.
220 !% The special value <tt>auto</tt>, the default, lets Octopus
221 !% decide how many processors will be assigned for this
222 !% strategy. To disable parallelization in domains, you can use
223 !% <tt>ParDomains = no</tt> (or set the number of processors to
224 !% 1).
225 !%
226 !% The total number of processors required is the multiplication
227 !% of the processors assigned to each parallelization strategy.
228 !%Option auto -1
229 !% The number of processors is assigned automatically.
230 !%Option no 0
231 !% This parallelization strategy is not used.
232 !%End
233 call parse_variable(namespace, 'ParDomains', default(p_strategy_domains), parse(p_strategy_domains))
234
235 !%Variable ParStates
236 !%Type integer
237 !%Section Execution::Parallelization
238 !%Description
239 !% This variable controls the number of processors used for the
240 !% parallelization in states. The special value <tt>auto</tt> lets
241 !% Octopus decide how many processors will be assigned for this
242 !% strategy. To disable parallelization in states, you can use
243 !% <tt>ParStates = no</tt> (or set the number of processors to 1).
244 !%
245 !% The default value depends on the <tt>CalculationMode</tt>. For
246 !% <tt>CalculationMode = td</tt> the default is <tt>auto</tt>, while
247 !% for for other modes the default is <tt>no</tt>.
248 !%
249 !% The total number of processors required is the multiplication
250 !% of the processors assigned to each parallelization strategy.
251 !%Option auto -1
252 !% The number of processors is assigned automatically.
253 !%Option no 0
254 !% This parallelization strategy is not used.
255 !%End
256 call parse_variable(namespace, 'ParStates', default(p_strategy_states), parse(p_strategy_states))
257
258 !%Variable ParKPoints
259 !%Type integer
260 !%Default auto
261 !%Section Execution::Parallelization
262 !%Description
263 !% This variable controls the number of processors used for the
264 !% parallelization in K-Points and/or spin.
265 !% The special value <tt>auto</tt> lets Octopus decide how many processors will be
266 !% assigned for this strategy. To disable parallelization in
267 !% KPoints, you can use <tt>ParKPoints = no</tt> (or set the
268 !% number of processors to 1).
269 !%
270 !% The total number of processors required is the multiplication
271 !% of the processors assigned to each parallelization strategy.
272 !%Option auto -1
273 !% The number of processors is assigned automatically.
274 !%Option no 0
275 !% This parallelization strategy is not used.
276 !%End
277 call parse_variable(namespace, 'ParKPoints', default(p_strategy_kpoints), parse(p_strategy_kpoints))
278
279 !%Variable ParOther
280 !%Type integer
281 !%Default auto
282 !%Section Execution::Parallelization
283 !%Description
284 !% This variable controls the number of processors used for the
285 !% 'other' parallelization mode, that is CalculatioMode
286 !% dependent. For <tt>CalculationMode = casida</tt>, it means
287 !% parallelization in electron-hole pairs.
288 !%
289 !% The special value <tt>auto</tt>,
290 !% the default, lets Octopus decide how many processors will be
291 !% assigned for this strategy. To disable parallelization in
292 !% Other, you can use <tt>ParOther = no</tt> (or set the
293 !% number of processors to 1).
294 !%
295 !% The total number of processors required is the multiplication
296 !% of the processors assigned to each parallelization strategy.
297 !%Option auto -1
298 !% The number of processors is assigned automatically.
299 !%Option no 0
300 !% This parallelization strategy is not used.
301 !%End
302 call parse_variable(namespace, 'ParOther', default(p_strategy_other), parse(p_strategy_other))
303
304 do ipar = 1, p_strategy_max
305 if (parse(ipar) == par_no) parse(ipar) = 1
306 end do
307
308 call strategy()
309
310 mc%have_slaves = .false.
311
312 if (mc%par_strategy /= p_strategy_serial) then
313 safe_allocate(mc%group_sizes(1:p_strategy_max))
314
315 mc%group_sizes = 1
316
317 do ipar = 1, p_strategy_max
318 if (multicomm_strategy_is_parallel(mc, ipar)) then
319 mc%group_sizes(ipar) = parse(ipar)
320 else if (parse(ipar) /= 1) then
321 call messages_write('Ignoring specification for ' // par_types(ipar))
322 call messages_new_line()
323 call messages_write('This parallelization strategy is not available.')
324 call messages_warning()
325 end if
326 end do
327
328 call assign_nodes()
329
330
331 !%Variable ParallelizationNumberSlaves
332 !%Type integer
333 !%Default 0
334 !%Section Execution::Parallelization
335 !%Description
336 !% Slaves are nodes used for task parallelization. The number of
337 !% such nodes is given by this variable multiplied by the number
338 !% of domains used in domain parallelization.
339 !%End
340 call parse_variable(namespace, 'ParallelizationNumberSlaves', 0, num_slaves)
341
342 ! the slaves must be defined at a certain parallelization level, for the moment this is state parallelization.
343 slave_level = p_strategy_states
344 mc%have_slaves = (num_slaves > 0)
345
346 if (mc%have_slaves) then
347 call messages_experimental('Task parallelization')
348 end if
349
350 ! clear parallel strategies that were available but will not be used
351 do ii = 1, p_strategy_max
352 if (mc%group_sizes(ii) == 1) mc%par_strategy = ibclr(mc%par_strategy, ii - 1)
353 end do
354
355 ! reset
356 call sanity_check()
357 end if
358
359 call group_comm_create()
360
361 call messages_print_with_emphasis(namespace=namespace)
362
363 pop_sub(multicomm_init)
364
365 contains
366
367 ! ---------------------------------------------------------
368 subroutine strategy()
369 integer :: jj, ipar
370
371 push_sub(multicomm_init.strategy)
372
373 if (base_grp%size > 1) then
374
375 mc%par_strategy = 0
376
377 do ipar = 1, p_strategy_max
378 if (parse(ipar) == par_auto .or. parse(ipar) > 1) then
379 mc%par_strategy = ibset(mc%par_strategy, ipar - 1)
380 end if
381 end do
382
383 if (mc%par_strategy /= bitand(mc%par_strategy, parallel_mask)) then
384 call messages_write('Parallelization strategies unavailable for this run mode are being discarded.')
385 call messages_warning()
386 end if
387
388 mc%par_strategy = bitand(mc%par_strategy, parallel_mask)
389
390 if (mc%par_strategy == p_strategy_serial) then
391 message(1) = "More than one node is available, but this run mode cannot run with the requested parallelization."
392 message(2) = "Please select a parallelization strategy compatible with"
393 jj = 2
394 do ii = 1, n_par_types
395 if (bitand(parallel_mask, 2**(ii - 1)) /= 0) then
396 jj = jj + 1
397 write(message(jj), '(2a)') " -> ", par_types(ii)
398 end if
399 end do
400 jj=jj+1
401 write(message(jj),'(a,i6)') "mc%par_strategy is : ",mc%par_strategy
402 call messages_fatal(jj, only_root_writes = .true.)
403 end if
404 else
405 mc%par_strategy = p_strategy_serial
406 end if
407
408 mc%nthreads = 1
409#if defined(HAVE_OPENMP)
410 !$omp parallel
411 !$omp master
412 mc%nthreads = omp_get_num_threads()
413 !$omp end master
414 !$omp end parallel
415#endif
416
417 if (mc%par_strategy == p_strategy_serial .and. mc%nthreads == 1) then
418 message(1) = "Info: Octopus will run in *serial*"
419 call messages_info(1, namespace=namespace)
420 else
421 write(message(1),'(a)') 'Info: Octopus will run in *parallel*'
422 write(message(2),'(a)') ''
423 write(message(3),'(a, i8)') ' Number of processes :', base_grp%size
424 write(message(4),'(a, i8)') ' Number of threads per process :', mc%nthreads
425 write(message(5),'(a)') ''
426 call messages_info(5, namespace=namespace)
427 end if
428
429 pop_sub(multicomm_init.strategy)
430 end subroutine strategy
431
432 ! ---------------------------------------------------------
433
434 subroutine assign_nodes()
435 integer :: ii, nn, kk, n_divisors, divisors(1:50)
436 integer(int64) :: n_group_max(1:p_strategy_max)
437
439
440 ! this is the maximum number of processors in each group
441 n_group_max(1:p_strategy_max) = max(index_range(1:p_strategy_max), 1)
442 do kk = 1, p_strategy_max
443 if (.not. multicomm_strategy_is_parallel(mc, kk)) n_group_max(kk) = 1
444 end do
445
446 if (debug%info) then
447 call messages_write('Debug info: Allowable group ranks:', new_line = .true.)
448 do kk = 1, p_strategy_max
449 call messages_write(par_types(kk), fmt = '2x,a12,":",1x')
450 call messages_write(n_group_max(kk), new_line = .true.)
451 end do
452 call messages_info()
453 end if
454
455 nn = mc%n_node
456
457 ! first loop, check the processors assigned by the user
458 do ipar = p_strategy_max, 1, -1
459
460 if (mc%group_sizes(ipar) == par_auto) cycle
462 if (mc%group_sizes(ipar) > n_group_max(ipar)) then
463 call messages_write('The number of processors specified for '//par_types(ipar)//'(')
464 call messages_write(mc%group_sizes(ipar))
465 call messages_write(')', new_line = .true.)
466 call messages_write('is larger than the degrees of freedom for that level (')
467 call messages_write(n_group_max(ipar))
468 call messages_write(').')
469 call messages_warning()
470 end if
471
472 if (mod(nn, mc%group_sizes(ipar)) /= 0) then
473 call messages_write('The number of processors specified for '//par_types(ipar)//'(')
474 call messages_write(mc%group_sizes(ipar))
475 call messages_write(')', new_line = .true.)
476 call messages_write('is not a divisor of the number of processors (')
477 call messages_write(mc%n_node)
478 call messages_write(').')
479 call messages_fatal()
480 end if
481
482 nn = nn/mc%group_sizes(ipar)
483
484 end do
485
486 ! second loop, now assign the rest automatically
487 do ipar = p_strategy_max, 1, -1
488
489 if (mc%group_sizes(ipar) /= par_auto) cycle
490
491 n_divisors = ubound(divisors, dim = 1)
492 call get_divisors(nn, n_divisors, divisors)
493
494 mc%group_sizes(ipar) = nn
495 do ii = 2, n_divisors
496 if (divisors(ii) > n_group_max(ipar)) then
497 mc%group_sizes(ipar) = divisors(ii - 1)
498 exit
499 end if
500 end do
501
502 nn = nn/mc%group_sizes(ipar)
503
504 end do
505
507 end subroutine assign_nodes
508
509
510 ! ---------------------------------------------------------
512 subroutine sanity_check()
513 real(real64) :: frac
514 integer :: ii, kk
515 integer(int64) :: jj, n_max
516 integer :: real_group_sizes(1:MAX_INDEX)
517
519
520 if (num_slaves > 0) then
521
522 if (mc%group_sizes(slave_level) < num_slaves + 1) then
523 message(1) = 'Too many nodes assigned to task parallelization.'
524 call messages_fatal(1)
525 end if
526
527 write(message(1),'(a,i6)') 'Info: Number of slaves nodes :', &
528 num_slaves*product(mc%group_sizes(1:slave_level - 1))
529 call messages_info(1)
530
531 end if
532
533 ! print out some info
534 ii = 0
535 do kk = p_strategy_max, 1, -1
536 real_group_sizes(kk) = mc%group_sizes(kk)
537 if (.not. multicomm_strategy_is_parallel(mc, kk)) cycle
538 ii = ii + 1
539 if (kk == slave_level) real_group_sizes(kk) = real_group_sizes(kk) - num_slaves
540 write(message(ii),'(3a,i6,a,i12,a)') 'Info: Number of nodes in ', &
541 par_types(kk), ' group:', real_group_sizes(kk), ' (', index_range(kk), ')'
542 end do
543 call messages_info(ii)
544
545 ! do we have the correct number of processors
546 if (product(mc%group_sizes(1:p_strategy_max)) /= base_grp%size) then
547 write(message(1),'(a)') 'Inconsistent number of processors:'
548 write(message(2),'(a,i6)') ' MPI processes = ', base_grp%size
549 write(message(3),'(a,i6)') ' Required processes = ', product(mc%group_sizes(1:p_strategy_max))
550 message(4) = ''
551 message(5) = 'You probably have a problem in the ParDomains, ParStates, ParKPoints or ParOther.'
552 call messages_fatal(5, only_root_writes = .true.)
553 end if
554
555 if (any(real_group_sizes(1:p_strategy_max) > index_range(1:p_strategy_max))) then
556 message(1) = "Could not distribute nodes in parallel job. Most likely you are trying to"
557 message(2) = "use too many nodes for the job."
558 call messages_fatal(2, only_root_writes = .true.)
559 end if
560
561 if (any(index_range(1:p_strategy_max) / real_group_sizes(1:p_strategy_max) < min_range(1:p_strategy_max) .and. &
562 real_group_sizes(1:p_strategy_max) > 1)) then
563 message(1) = "I have fewer elements in a parallel group than recommended."
564 message(2) = "Maybe you should reduce the number of nodes."
565 call messages_warning(2)
566 end if
567
568 ! calculate fraction of idle time
569 frac = m_one
570 do ii = 1, p_strategy_max
571 n_max = ceiling(real(index_range(ii), real64) / real(real_group_sizes(ii)), real64)
572 jj = n_max*real_group_sizes(ii)
573 frac = frac*(m_one - real(jj - index_range(ii), real64) / real(jj, real64) )
574 end do
575
576 write(message(1), '(a,f5.2,a)') "Info: Octopus will waste at least ", &
577 (m_one - frac)*100.0_real64, "% of computer time."
578 if (frac < 0.8_real64) then
579 message(2) = "Usually a number of processors which is a multiple of small primes is best."
580 call messages_warning(2)
581 else
582 call messages_info(1)
583 end if
584
586 end subroutine sanity_check
587
588 ! ---------------------------------------------------------
589 subroutine group_comm_create()
590#if defined(HAVE_MPI)
591 logical :: dim_mask(MAX_INDEX)
592 integer :: i_strategy, irank
593 logical :: reorder, periodic_mask(MAX_INDEX)
594 integer :: coords(MAX_INDEX)
595 type(mpi_comm) :: new_comm
596 integer :: new_comm_size
597 character(len=6) :: node_type
598 type(mpi_grp_t) :: reorder_grp
599 type(mpi_group) :: base_group, reorder_group
600 integer :: ranks(base_grp%size)
601 integer :: ii, jj, kk, ll, nn
602 type(mpi_comm) :: reorder_comm
603#endif
604
606
607 mc%node_type = p_master
608
609 safe_allocate(mc%group_comm(1:p_strategy_max))
610 safe_allocate(mc%who_am_i(1:p_strategy_max))
611
612#if defined(HAVE_MPI)
613 mc%full_comm = mpi_comm_null
614 mc%slave_intercomm = mpi_comm_null
615 if (mc%par_strategy /= p_strategy_serial) then
616 if (mc%reorder_ranks) then
617 ! first, reorder the ranks
618 ! this is done to get a column-major ordering of the ranks in the
619 ! Cartesian communicator, since they a ordered row-major otherwise
620 call mpi_comm_group(base_grp%comm, base_group, mpi_err)
621 if (mpi_err /= mpi_success) then
622 message(1) = "Error in getting MPI group!"
623 call messages_fatal(1)
624 end if
625 ! now transpose the hypercube => get rank numbers in column-major order
626 nn = 1
627 do ii = 1, mc%group_sizes(1)
628 do jj = 1, mc%group_sizes(2)
629 do kk = 1, mc%group_sizes(3)
630 do ll = 1, mc%group_sizes(4)
631 ranks(nn) = (ll-1)*mc%group_sizes(3)*mc%group_sizes(2)*mc%group_sizes(1) &
632 + (kk-1)*mc%group_sizes(2)*mc%group_sizes(1) &
633 + (jj-1)*mc%group_sizes(1) + ii - 1
634 nn = nn + 1
635 end do
636 end do
637 end do
638 end do
639 call mpi_group_incl(base_group, base_grp%size, ranks, reorder_group, mpi_err)
640 if (mpi_err /= mpi_success) then
641 message(1) = "Error in creating MPI group!"
642 call messages_fatal(1)
643 end if
644 ! now get the reordered communicator
645 call mpi_comm_create(base_grp%comm, reorder_group, reorder_comm, mpi_err)
646 if (mpi_err /= mpi_success) then
647 message(1) = "Error in creating reordered communicator!"
648 call messages_fatal(1)
649 end if
650 call mpi_grp_init(reorder_grp, reorder_comm)
651 else
652 call mpi_grp_copy(reorder_grp, base_grp)
653 end if
654
655 ! Multilevel parallelization is organized in a hypercube. We
656 ! use an MPI Cartesian topology to generate the communicators
657 ! that correspond to each level.
658
659 ! create the topology
660 periodic_mask = .false.
661 reorder = .true.
662
663 ! The domain and states dimensions have to be periodic (2D torus)
664 ! in order to circulate matrix blocks.
667
668 ! We allow reordering of ranks.
669 call mpi_cart_create(reorder_grp%comm, p_strategy_max, mc%group_sizes, periodic_mask, reorder, mc%full_comm, mpi_err)
670
671 call mpi_comm_rank(mc%full_comm, mc%full_comm_rank, mpi_err)
672
673 ! get the coordinates of the current processor
674 call mpi_cart_coords(mc%full_comm, mc%full_comm_rank, p_strategy_max, coords, mpi_err)
676 ! find out what type of node this is
677 if (coords(slave_level) >= mc%group_sizes(slave_level) - num_slaves) then
678 mc%node_type = p_slave
679 end if
680
681 if (mc%node_type == p_master) then
682 mc%group_sizes(slave_level) = mc%group_sizes(slave_level) - num_slaves
683 else
684 mc%group_sizes(slave_level) = num_slaves
685 end if
686
687 call mpi_comm_split(mc%full_comm, mc%node_type, mc%full_comm_rank, new_comm, mpi_err)
688 assert(new_comm /= mpi_comm_null)
689 call mpi_comm_size(new_comm, new_comm_size, mpi_err)
690
691 reorder = .false.
692 if (product(mc%group_sizes(:)) /= new_comm_size) then
693 write(stderr,*) 'node ', mpi_world%rank, ': mc%group_sizes = ', mc%group_sizes, ' new_comm_size = ', new_comm_size
694 call mpi_world%barrier()
695 assert(product(mc%group_sizes(:)) == new_comm_size)
696 end if
697 call mpi_cart_create(new_comm, p_strategy_max, mc%group_sizes, periodic_mask, reorder, mc%master_comm, mpi_err)
698 assert(mc%master_comm /= mpi_comm_null)
699
700 call mpi_comm_free(new_comm, mpi_err)
701
702 call mpi_comm_rank(mc%master_comm, mc%master_comm_rank, mpi_err)
703
704 ! The "lines" of the Cartesian grid.
705 ! Initialize all the communicators, even if they are not parallelized
706 do i_strategy = 1, p_strategy_max
707 dim_mask = .false.
708 dim_mask(i_strategy) = .true.
709 call mpi_cart_sub(mc%master_comm, dim_mask, mc%group_comm(i_strategy), mpi_err)
710 call mpi_comm_rank(mc%group_comm(i_strategy), mc%who_am_i(i_strategy), mpi_err)
711 end do
712
713 ! The domain-state "planes" of the grid (the ones with periodic dimensions).
714 dim_mask = .false.
715 dim_mask(p_strategy_domains) = .true.
716 dim_mask(p_strategy_states) = .true.
717 call mpi_cart_sub(mc%master_comm, dim_mask, mc%dom_st_comm, mpi_err)
718
719 ! The state-kpoints "planes" of the grid
720 dim_mask = .false.
721 dim_mask(p_strategy_states) = .true.
722 dim_mask(p_strategy_kpoints) = .true.
723 call mpi_cart_sub(mc%master_comm, dim_mask, mc%st_kpt_comm, mpi_err)
724
725 ! The domains-states-kpoints "cubes" of the grid
726 dim_mask = .false.
727 dim_mask(p_strategy_domains) = .true.
728 dim_mask(p_strategy_states) = .true.
729 dim_mask(p_strategy_kpoints) = .true.
730 call mpi_cart_sub(mc%master_comm, dim_mask, mc%dom_st_kpt_comm, mpi_err)
731
732 if (num_slaves > 0) call create_slave_intercommunicators()
733
734 call create_intranode_communicator(base_grp, mc%intranode_grp, mc%internode_grp)
735 else
736 ! we initialize these communicators so we can use them even in serial
737 mc%group_comm = base_grp%comm
738 mc%who_am_i = 0
739 mc%master_comm = base_grp%comm
740 mc%dom_st_comm = base_grp%comm
741 mc%st_kpt_comm = base_grp%comm
742 mc%dom_st_kpt_comm = base_grp%comm
743 call mpi_grp_copy(mc%intranode_grp, base_grp)
744 call mpi_grp_copy(mc%internode_grp, base_grp)
745 end if
746
747 ! This is temporary debugging information.
748 if (debug%info .and. mc%par_strategy /= p_strategy_serial) then
749 write(message(1),'(a)') 'Debug: MPI Task Assignment to MPI Groups'
750 write(message(2),'(5a10)') 'World', 'Domains', 'States', 'K-Points', 'Other'
751 call messages_info(1)
752
753 if (mc%node_type == p_slave) then
754 node_type = "slave"
755 else
756 node_type = "master"
757 end if
758 do irank = 0, mpi_world%size - 1
759 if (mpi_world%rank == irank) then
760 write(message(1),'(5i10,5x,a)') mpi_world%rank, mc%who_am_i(p_strategy_domains), mc%who_am_i(p_strategy_states), &
761 mc%who_am_i(p_strategy_kpoints), mc%who_am_i(p_strategy_other), trim(node_type)
762 call messages_info(1, all_nodes = .true.)
763 end if
764 call mpi_world%barrier()
765 end do
766 end if
767
768#else
769 mc%group_comm = mpi_comm_undefined
770 mc%who_am_i = 0
771 mc%full_comm = mpi_comm_undefined
772 mc%master_comm = mpi_comm_undefined
773 mc%dom_st_comm = mpi_comm_undefined
774 mc%st_kpt_comm = mpi_comm_undefined
775 mc%dom_st_kpt_comm = mpi_comm_undefined
776 mc%slave_intercomm = mpi_comm_undefined
777#endif
778
780 end subroutine group_comm_create
781
782 ! -----------------------------------------------------
783#ifdef HAVE_MPI
785 integer :: remote_leader
786 integer :: tag
787 integer :: coords(MAX_INDEX)
788
790
791 ! create the intercommunicators to communicate with slaves
792
793 ! get the coordinates of the current processor
794 call mpi_cart_coords(mc%full_comm, mc%full_comm_rank, p_strategy_max, coords, mpi_err)
795
796 !now get the rank of the remote_leader
797 if (mc%node_type == p_slave) then
798 coords(slave_level) = 0
799 else
800 coords(slave_level) = mc%group_sizes(slave_level)
801 end if
802 call mpi_cart_rank(mc%full_comm, coords, remote_leader, mpi_err)
803
804 ! now create the intercommunicator
805 tag = coords(p_strategy_domains)
806 call mpi_intercomm_create(mc%group_comm(slave_level), 0, base_grp%comm, remote_leader, tag, mc%slave_intercomm, mpi_err)
807
810#endif
811 end subroutine multicomm_init
812
813
814 ! ---------------------------------------------------------
815 subroutine multicomm_end(mc)
816 type(multicomm_t), intent(inout) :: mc
817
818#if defined(HAVE_MPI)
819 integer :: ii
820#endif
821
822 push_sub(multicomm_end)
823
824 if (mc%par_strategy /= p_strategy_serial) then
825#if defined(HAVE_MPI)
826 ! Delete communicators.
827 do ii = 1, p_strategy_max
828 ! initialized even if not parallelized
829 call mpi_comm_free(mc%group_comm(ii), mpi_err)
830 end do
831 call mpi_comm_free(mc%dom_st_comm, mpi_err)
832 call mpi_comm_free(mc%st_kpt_comm, mpi_err)
833 call mpi_comm_free(mc%dom_st_kpt_comm, mpi_err)
834 call mpi_comm_free(mc%full_comm, mpi_err)
835 call mpi_comm_free(mc%master_comm, mpi_err)
836
837 if (multicomm_have_slaves(mc)) call mpi_comm_free(mc%slave_intercomm, mpi_err)
838
839#endif
840 end if
841
842 safe_deallocate_a(mc%group_sizes)
843 safe_deallocate_a(mc%group_comm)
844 safe_deallocate_a(mc%who_am_i)
845
846 pop_sub(multicomm_end)
847 end subroutine multicomm_end
848
849
850 ! ---------------------------------------------------------
851 logical pure function multicomm_strategy_is_parallel(mc, level) result(rr)
852 type(multicomm_t), intent(in) :: mc
853 integer, intent(in) :: level
854
855 rr = bitand(mc%par_strategy, 2**(level - 1)) /= 0
856
858
860 ! ---------------------------------------------------------
867#if defined(HAVE_MPI)
868
869 subroutine multicomm_create_all_pairs(mpi_grp, ap)
870 type(mpi_grp_t), intent(in) :: mpi_grp
871 type(multicomm_all_pairs_t), intent(out) :: ap
872
873 integer :: grp_size, rounds, ir, in
874
875 push_sub(create_all_pairs)
876
877 ap%grp = mpi_grp
878 grp_size = mpi_grp%size
879
880 ! Number of rounds.
881 if (mod(grp_size, 2) == 0) then
882 rounds = grp_size - 1
883 else
884 rounds = grp_size
885 end if
886 ap%rounds = rounds
887
888 ! Calculate schedule.
889 safe_allocate(ap%schedule(0:grp_size - 1, 1:rounds))
890 do ir = 1, rounds
891 do in = 0, grp_size - 1
892 ap%schedule(in, ir) = get_partner(in + 1, ir) - 1
893 end do
894 end do
895
896 pop_sub(create_all_pairs)
897
898 contains
899
900 ! ---------------------------------------------------------
902 integer pure function get_partner(in, ir)
903 integer, intent(in) :: in, ir
904
905 ! No PUSH SUB, called too often.
906
907 if (mod(grp_size, 2) == 0) then
908 get_partner = get_partner_even(grp_size, in - 1, ir - 1) + 1
909 else
910 get_partner = get_partner_odd(grp_size, in - 1, ir - 1) + 1
911 end if
912
913 end function get_partner
914
915 ! ---------------------------------------------------------
916 integer pure function get_partner_even(grp_size, ii, rr) result(pp)
917 integer, intent(in) :: grp_size, ii, rr
918
919 integer :: mm
920
921 ! No PUSH SUB, called too often.
922
923 mm = grp_size / 2
924
925 if (ii == 0) then
926 pp = rr + 1
927 elseif (ii == rr + 1) then
928 pp = 0
929 else
930 ! I never know when to use which remainder function, but here
931 ! it has to be the modulo one. Do not change that!
932 pp = modulo(2 * rr - ii + 1, 2 * mm - 1) + 1
933 end if
934
935 end function get_partner_even
936
937 ! ---------------------------------------------------------
938 integer pure function get_partner_odd(grp_size, ii, rr) result(pp)
939 integer, intent(in) :: grp_size, ii, rr
940
941 integer :: mm
942
943 ! No PUSH SUB, called too often.
945 mm = (grp_size + 1) / 2
946
947 pp = get_partner_even(grp_size + 1, ii, rr)
948
949 if (pp == 2 * mm - 1) then
950 pp = ii
951 end if
952
953 end function get_partner_odd
954
955 end subroutine multicomm_create_all_pairs
956#endif
957
961 subroutine multicomm_divide_range(nobjs, nprocs, istart, ifinal, lsize, scalapack_compat)
962 integer, intent(in) :: nobjs
963 integer, intent(in) :: nprocs
964 integer, intent(out) :: istart(:)
965 integer, intent(out) :: ifinal(:)
966 integer, optional, intent(out) :: lsize(:)
967 logical, optional, intent(in) :: scalapack_compat
968
969 integer :: ii, jj, rank
970 logical :: scalapack_compat_
971 integer :: nbl, size
972
973 scalapack_compat_ = optional_default(scalapack_compat, .false.)
974#ifndef HAVE_SCALAPACK
975 scalapack_compat_ = .false.
976#endif
978 if (scalapack_compat_) then
979 nbl = nobjs/nprocs
980 if (mod(nobjs, nprocs) /= 0) nbl = nbl + 1
981
982 istart(1) = 1
983 do rank = 1, nprocs
984#ifdef HAVE_SCALAPACK
985 size = numroc(nobjs, nbl, rank - 1, 0, nprocs)
986#endif
987 if (size > 0) then
988 if (rank > 1) istart(rank) = ifinal(rank - 1) + 1
989 ifinal(rank) = istart(rank) + size - 1
990 else
991 istart(rank) = 1
992 ifinal(rank) = 0
993 end if
994 end do
995 else
996
997 if (nprocs <= nobjs) then
998
999 ! procs are assigned to groups by round robin
1000 do rank = 0, nprocs - 1
1001 jj = nobjs / nprocs
1002 ii = nobjs - jj*nprocs
1003 if (ii > 0 .and. rank < ii) then
1004 jj = jj + 1
1005 istart(rank + 1) = rank*jj + 1
1006 ifinal(rank + 1) = istart(rank + 1) + jj - 1
1007 else
1008 ifinal(rank + 1) = nobjs - (nprocs - rank - 1)*jj
1009 istart(rank + 1) = ifinal(rank + 1) - jj + 1
1010 end if
1011 end do
1012
1013 else
1014 do ii = 1, nprocs
1015 if (ii <= nobjs) then
1016 istart(ii) = ii
1017 ifinal(ii) = ii
1018 else
1019 istart(ii) = 1
1020 ifinal(ii) = 0
1021 end if
1022 end do
1023 end if
1024 end if
1025
1026 if (present(lsize)) then
1027 lsize(1:nprocs) = ifinal(1:nprocs) - istart(1:nprocs) + 1
1028 assert(sum(lsize(1:nprocs)) == nobjs)
1029 end if
1030
1031 end subroutine multicomm_divide_range
1032
1033 ! ---------------------------------------------------------
1036 ! THREADSAFE
1037 subroutine multicomm_divide_range_omp(nobjs, ini, nobjs_loc)
1038 integer, intent(in) :: nobjs
1039 integer, intent(out) :: ini
1040 integer, intent(out) :: nobjs_loc
1041
1042 integer :: rank
1043#ifdef HAVE_OPENMP
1044 integer, allocatable :: istart(:), ifinal(:), lsize(:)
1045 integer :: nthreads
1046#endif
1047
1048 ! no push_sub, threadsafe
1049 rank = 1
1050#ifdef HAVE_OPENMP
1051 nthreads = omp_get_num_threads()
1052 allocate(istart(1:nthreads))
1053 allocate(ifinal(1:nthreads))
1054 allocate(lsize(1:nthreads))
1055 call multicomm_divide_range(nobjs, nthreads, istart, ifinal, lsize)
1056 rank = 1 + omp_get_thread_num()
1057 ini = istart(rank)
1058 nobjs_loc = lsize(rank)
1059 deallocate(istart)
1060 deallocate(ifinal)
1061 deallocate(lsize)
1062#else
1063 ini = 1
1064 nobjs_loc = nobjs
1065#endif
1066
1067 end subroutine multicomm_divide_range_omp
1068
1069 ! ---------------------------------------------------------
1070
1071 logical pure function multicomm_is_slave(this) result(slave)
1072 type(multicomm_t), intent(in) :: this
1073
1074 slave = this%node_type == p_slave
1075 end function multicomm_is_slave
1076
1077 ! ---------------------------------------------------------
1078
1079 logical pure function multicomm_have_slaves(this) result(have_slaves)
1080 type(multicomm_t), intent(in) :: this
1081
1082 have_slaves = this%have_slaves
1083 end function multicomm_have_slaves
1084
1085end module multicomm_oct_m
1086
1087
1088!! Local Variables:
1089!! mode: f90
1090!! coding: utf-8
1091!! End:
integer pure function get_partner(in, ir)
Those are from the paper cited above.
Definition: multicomm.F90:978
subroutine strategy()
Definition: multicomm.F90:462
subroutine group_comm_create()
Definition: multicomm.F90:676
integer pure function get_partner_even(grp_size, ii, rr)
Definition: multicomm.F90:992
subroutine create_slave_intercommunicators()
Definition: multicomm.F90:860
subroutine sanity_check()
check if a balanced distribution of nodes will be used
Definition: multicomm.F90:599
subroutine assign_nodes()
Definition: multicomm.F90:521
integer pure function get_partner_odd(grp_size, ii, rr)
Definition: multicomm.F90:1014
This module contains interfaces for BLACS routines Interfaces are from http:
Definition: blacs.F90:27
This module handles the calculation mode.
integer, parameter, public p_strategy_max
integer, parameter, public p_strategy_kpoints
parallelization in k-points
integer, parameter, public p_strategy_other
something else like e-h pairs
integer, parameter, public p_strategy_domains
parallelization in domains
integer, parameter, public p_strategy_serial
single domain, all states, k-points on a single processor
integer, parameter, public p_strategy_states
parallelization in states
type(debug_t), save, public debug
Definition: debug.F90:154
real(real64), parameter, public m_one
Definition: global.F90:188
subroutine, public messages_print_with_emphasis(msg, iunit, namespace)
Definition: messages.F90:930
character(len=512), private msg
Definition: messages.F90:165
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:543
subroutine, public messages_obsolete_variable(namespace, name, rep)
Definition: messages.F90:1057
subroutine, public messages_info(no_lines, iunit, verbose_limit, stress, all_nodes, namespace)
Definition: messages.F90:624
subroutine, public messages_new_line()
Definition: messages.F90:1146
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:420
subroutine, public messages_experimental(name, namespace)
Definition: messages.F90:1097
This module contains some common usage patterns of MPI routines.
Definition: mpi_lib.F90:115
subroutine, public create_intranode_communicator(base_grp, intranode_grp, internode_grp)
Definition: mpi_lib.F90:205
subroutine mpi_grp_copy(mpi_grp_out, mpi_grp_in)
Definition: mpi.F90:403
type(mpi_comm), parameter, public mpi_comm_undefined
used to indicate a communicator has not been initialized
Definition: mpi.F90:136
type(mpi_grp_t), public mpi_world
Definition: mpi.F90:266
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:346
integer, public mpi_err
used to store return values of mpi calls
Definition: mpi.F90:269
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:145
logical pure function, public multicomm_strategy_is_parallel(mc, level)
Definition: multicomm.F90:927
integer, parameter, public p_slave
Definition: multicomm.F90:181
subroutine, public multicomm_divide_range(nobjs, nprocs, istart, ifinal, lsize, scalapack_compat)
Divide the range of numbers [1, nobjs] between nprocs processors.
Definition: multicomm.F90:1037
subroutine, public multicomm_all_pairs_copy(apout, apin)
Definition: multicomm.F90:246
subroutine, public multicomm_end(mc)
Definition: multicomm.F90:891
integer, parameter, public par_no
Definition: multicomm.F90:185
logical pure function, public multicomm_have_slaves(this)
Definition: multicomm.F90:1143
logical pure function, public multicomm_is_slave(this)
Definition: multicomm.F90:1135
subroutine, public multicomm_init(mc, namespace, base_grp, mode_para, n_node, index_range, min_range)
create index and domain communicators
Definition: multicomm.F90:264
subroutine, public multicomm_create_all_pairs(mpi_grp, ap)
This routine uses the one-factorization (or near-one-factorization of a complete graph to construct a...
Definition: multicomm.F90:945
subroutine, public multicomm_divide_range_omp(nobjs, ini, nobjs_loc)
Function to divide the range of numbers from 1 to nobjs between all available threads with OpenMP.
Definition: multicomm.F90:1113
This module is intended to contain simple general-purpose utility functions and procedures.
Definition: utils.F90:118
subroutine, public get_divisors(nn, n_divisors, divisors)
Definition: utils.F90:168
This is defined even when running serial.
Definition: mpi.F90:142
An all-pairs communication schedule for a given group.
Definition: multicomm.F90:235
Stores all communicators and groups.
Definition: multicomm.F90:206
int true(void)