Octopus
states_elec.F90
Go to the documentation of this file.
1!! Copyright (C) 2002-2006 M. Marques, A. Castro, A. Rubio, G. Bertsch
2!!
3!! This program is free software; you can redistribute it and/or modify
4!! it under the terms of the GNU General Public License as published by
5!! the Free Software Foundation; either version 2, or (at your option)
6!! any later version.
7!!
8!! This program is distributed in the hope that it will be useful,
9!! but WITHOUT ANY WARRANTY; without even the implied warranty of
10!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11!! GNU General Public License for more details.
12!!
13!! You should have received a copy of the GNU General Public License
14!! along with this program; if not, write to the Free Software
15!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16!! 02110-1301, USA.
17!!
18#include "global.h"
19
21 use accel_oct_m
22 use batch_oct_m
26 use batch_oct_m
29 use comm_oct_m
30 use debug_oct_m
34 use global_oct_m
35 use grid_oct_m
36 use io_oct_m
37 use, intrinsic :: iso_fortran_env
40 use loct_oct_m
41 use math_oct_m
42 use mesh_oct_m
47 use mpi_oct_m
50#ifdef HAVE_OPENMP
51 use omp_lib
52#endif
53 use parser_oct_m
57 use smear_oct_m
58 use space_oct_m
62 use types_oct_m
63 use unit_oct_m
67
68 implicit none
69
70 private
71
72 public :: &
103 stress_t, &
105
106
107 ! this type must be moved to stress module but due to circular dependency it is not possible now
108 type stress_t
109 real(real64) :: total(3,3) = m_zero
110 real(real64) :: kinetic(3,3) = m_zero
111 real(real64) :: Hartree(3,3) = m_zero
112 real(real64) :: xc(3,3) = m_zero
113 real(real64) :: ps_local(3,3) = m_zero
114 real(real64) :: ps_nl(3,3) = m_zero
115 real(real64) :: ion_ion(3,3) = m_zero
116 real(real64) :: vdw(3,3) = m_zero
117 real(real64) :: hubbard(3,3) = m_zero
118
119 real(real64) :: kinetic_sumrule
120 real(real64) :: hartree_sumrule
121 end type stress_t
122
123 ! TODO(Alex) Issue #672 Decouple k-point info from `states_elec_dim_t`
124
132 !
133 type, extends(states_abst_t) :: states_elec_t
134 ! Components are public by default
135 type(states_elec_dim_t) :: d
136 integer :: nst_conv
137
138 logical :: only_userdef_istates
139
140 type(states_elec_group_t) :: group
141 integer :: block_size
145 logical :: pack_states
149
150
151 character(len=1024), allocatable :: user_def_states(:,:,:)
154
155 ! TODO(Alex) Issue #821. Collate current quantities into an object.
156 ! the densities and currents (after all we are doing DFT :)
157 real(real64), allocatable :: rho(:,:)
158 real(real64), allocatable :: rho_core(:)
159
160 real(real64), allocatable :: current(:, :, :)
161 real(real64), allocatable :: current_para(:, :, :)
162 real(real64), allocatable :: current_dia(:, :, :)
163 real(real64), allocatable :: current_mag(:, :, :)
164 real(real64), allocatable :: current_kpt(:,:,:)
165
166 ! TODO(Alex) Issue #673. Create frozen density class and replace in states_elec_t
167 ! It may be required to "freeze" the deepest orbitals during the evolution; the density
168 ! of these orbitals is kept in frozen_rho. It is different from rho_core.
169 real(real64), allocatable :: frozen_rho(:, :)
170 real(real64), allocatable :: frozen_tau(:, :)
171 real(real64), allocatable :: frozen_gdens(:,:,:)
172 real(real64), allocatable :: frozen_ldens(:,:)
173
174 logical :: uniform_occ
175
176 real(real64), allocatable :: eigenval(:,:)
177 logical :: fixed_occ
178 logical :: restart_fixed_occ
179 logical :: restart_reorder_occs
180 real(real64), allocatable :: occ(:,:)
181 real(real64), allocatable :: kweights(:)
182 integer :: nik
183
184 logical, private :: fixed_spins
186 real(real64), allocatable :: spin(:, :, :)
187
188 real(real64) :: qtot
189 real(real64) :: val_charge
190
191 type(stress_t) :: stress_tensors
192
193 logical :: fromScratch
194 type(smear_t) :: smear
195
196 ! TODO(Alex) Issue #823 Move modelmbparticles out of states_elec_t
197 type(modelmb_particle_t) :: modelmbparticles
198 integer, allocatable :: mmb_nspindown(:,:)
199 integer, allocatable :: mmb_iyoung(:,:)
200 real(real64), allocatable :: mmb_proj(:)
202 logical :: parallel_in_states = .false.
204 ! TODO(Alex) Issue #824. Package the communicators in a single instance prior to removing
205 ! or consider creating a distributed_t instance for each (as distributed_t contains the an instance of mpi_grp_t)
206 type(mpi_grp_t) :: mpi_grp
207 type(mpi_grp_t) :: dom_st_mpi_grp
208 type(mpi_grp_t) :: st_kpt_mpi_grp
209 type(mpi_grp_t) :: dom_st_kpt_mpi_grp
210 type(blacs_proc_grid_t) :: dom_st_proc_grid
211
212 type(distributed_t) :: dist
213 logical :: scalapack_compatible
214
215 ! TODO(Alex) Issue #820. Remove lnst, st_start, st_end and node, as they are all contained within dist
216 integer :: lnst
217 integer :: st_start, st_end
218 integer, allocatable :: node(:)
219
220 ! TODO(Alex) Issue #824. Either change from data to a method, or package with `st_kpt_mpi_grp`
221 integer, allocatable :: st_kpt_task(:,:)
222
223 type(multicomm_all_pairs_t), private :: ap
224 logical :: symmetrize_density
225 integer :: randomization
226 integer :: orth_method = 0
227
228 real(real64) :: cl_states_mem
230 contains
231 procedure :: nullify => states_elec_null
232 procedure :: write_info => states_elec_write_info
233 procedure :: pack => states_elec_pack
234 procedure :: unpack => states_elec_unpack
235 procedure :: set_zero => states_elec_set_zero
236 procedure :: dipole => states_elec_calculate_dipole
237 end type states_elec_t
240 integer, public, parameter :: &
241 par_independent = 1, &
242 par_dependent = 2
243
245 interface states_elec_get_state
248 end interface states_elec_get_state
249
258
261 end interface
263contains
265 ! TODO(Alex): Issue #826. Rename to something like "states_elec_default_wfs_type", or remove
266 subroutine states_elec_null(st)
267 class(states_elec_t), intent(inout) :: st
268
271 st%wfs_type = type_float ! By default, calculations use real wavefunctions
273 st%packed = .false.
276 end subroutine states_elec_null
278
280 subroutine states_elec_init(st, namespace, space, valence_charge, kpoints)
281 type(states_elec_t), target, intent(inout) :: st
282 type(namespace_t), intent(in) :: namespace
283 type(electron_space_t), intent(in) :: space
284 real(real64), intent(in) :: valence_charge
285 type(kpoints_t), intent(in) :: kpoints
287 real(real64) :: excess_charge, nempty_percent
288 integer :: nempty, ntot, default
289 integer :: nempty_conv
290 logical :: force
294 st%fromScratch = .true. ! this will be reset if restart_read is called
296
297 ! We get the spin dimension from the electronic space
298 ! TODO: Remove spin space information from states_elec_dim
299 st%d%ispin = space%ispin
301 ! Use of spinors requires complex wavefunctions.
302 if (st%d%ispin == spinors) call states_set_complex(st)
304 if (st%d%ispin /= unpolarized .and. kpoints%use_time_reversal) then
305 message(1) = "Time reversal symmetry is only implemented for unpolarized spins."
306 message(2) = "Use KPointsUseTimeReversal = no."
307 call messages_fatal(2, namespace=namespace)
308 end if
311 !%Variable ExcessCharge
312 !%Type float
313 !%Default 0.0
314 !%Section States
315 !%Description
316 !% The net charge of the system. A negative value means that we are adding
317 !% electrons, while a positive value means we are taking electrons
318 !% from the system.
319 !%End
320 call parse_variable(namespace, 'ExcessCharge', m_zero, excess_charge)
322 !%Variable TotalStates
323 !%Type integer
324 !%Default 0
325 !%Section States
326 !%Description
327 !% This variable sets the total number of states that Octopus will
328 !% use. This is normally not necessary since by default Octopus
329 !% sets the number of states to the minimum necessary to hold the
330 !% electrons present in the system. (This default behavior is
331 !% obtained by setting <tt>TotalStates</tt> to 0).
332 !%
333 !% If you want to add some unoccupied states, probably it is more convenient to use the variable
334 !% <tt>ExtraStates</tt>.
335 !%End
336 call parse_variable(namespace, 'TotalStates', 0, ntot)
337 if (ntot < 0) then
338 write(message(1), '(a,i5,a)') "Input: '", ntot, "' is not a valid value for TotalStates."
339 call messages_fatal(1, namespace=namespace)
340 end if
341
342 !%Variable ExtraStates
343 !%Type integer
344 !%Default 0
345 !%Section States
346 !%Description
347 !% The number of states is in principle calculated considering the minimum
348 !% numbers of states necessary to hold the electrons present in the system.
349 !% The number of electrons is
350 !% in turn calculated considering the nature of the species supplied in the
351 !% <tt>Species</tt> block, and the value of the <tt>ExcessCharge</tt> variable.
352 !% However, one may command <tt>Octopus</tt> to use more states, which is necessary if one wants to
353 !% use fractional occupational numbers, either fixed from the beginning through
354 !% the <tt>Occupations</tt> block or by prescribing
355 !% an electronic temperature with <tt>Smearing</tt>, or in order to calculate
356 !% excited states (including with <tt>CalculationMode = unocc</tt>).
357 !%End
358 call parse_variable(namespace, 'ExtraStates', 0, nempty)
359 if (nempty < 0) then
360 write(message(1), '(a,i5,a)') "Input: '", nempty, "' is not a valid value for ExtraStates."
361 message(2) = '(0 <= ExtraStates)'
362 call messages_fatal(2, namespace=namespace)
363 end if
364
365 if (ntot > 0 .and. nempty > 0) then
366 message(1) = 'You cannot set TotalStates and ExtraStates at the same time.'
367 call messages_fatal(1, namespace=namespace)
368 end if
369
370 !%Variable ExtraStatesInPercent
371 !%Type float
372 !%Default 0
373 !%Section States
374 !%Description
375 !% This variable allows to set the number of extra/empty states as percentage of the
376 !% used occupied states. For example, a value 35 for ExtraStatesInPercent would amount
377 !% to ceiling(35/100 * nstates) extra states, where nstates denotes the amount of occupied
378 !% states Octopus is using for the system at hand.
379 !%End
380 call parse_variable(namespace, 'ExtraStatesInPercent', m_zero, nempty_percent)
381 if (nempty_percent < 0) then
382 write(message(1), '(a,f8.6,a)') "Input: '", nempty_percent, &
383 "' should be a percentage value x (where x is parts in hundred) larger or equal 0"
384 call messages_fatal(1, namespace=namespace)
385 end if
386
387 if (nempty > 0 .and. nempty_percent > 0) then
388 message(1) = 'You cannot set ExtraStates and ExtraStatesInPercent at the same time.'
389 call messages_fatal(1, namespace=namespace)
390 end if
391
392 !%Variable ExtraStatesToConverge
393 !%Type integer
394 !%Default 0
395 !%Section States
396 !%Description
397 !% Only for unocc calculations.
398 !% Specifies the number of extra states that will be considered for reaching the convergence.
399 !% Together with <tt>ExtraStates</tt>, one can have some more states which will not be
400 !% considered for the convergence criteria, thus making the convergence of the
401 !% unocc calculation faster.
402 !% By default, all extra states need to be converged.
403 !%End
404 call parse_variable(namespace, 'ExtraStatesToConverge', nempty, nempty_conv)
405 if (nempty < 0) then
406 write(message(1), '(a,i5,a)') "Input: '", nempty_conv, "' is not a valid value for ExtraStatesToConverge."
407 message(2) = '(0 <= ExtraStatesToConverge)'
408 call messages_fatal(2, namespace=namespace)
409 end if
410
411 if (nempty_conv > nempty) then
412 message(1) = 'You cannot set ExtraStatesToConverge to a higher value than ExtraStates.'
413 call messages_fatal(1, namespace=namespace)
414 end if
415
416 ! For non-periodic systems this should just return the Gamma point
417 call states_elec_choose_kpoints(st, kpoints, namespace)
418
419 st%val_charge = valence_charge
420
421 st%qtot = -(st%val_charge + excess_charge)
422
423 if (st%qtot < -m_epsilon) then
424 write(message(1),'(a,f12.6,a)') 'Total charge = ', st%qtot, ' < 0'
425 message(2) = 'Check Species and ExcessCharge.'
426 call messages_fatal(2, only_root_writes = .true., namespace=namespace)
427 end if
428
429 select case (st%d%ispin)
430 case (unpolarized)
431 st%d%dim = 1
432 st%nst = int(st%qtot/2)
433 if (st%nst*2 < st%qtot) st%nst = st%nst + 1
434 st%d%nspin = 1
435 st%d%spin_channels = 1
436 case (spin_polarized)
437 st%d%dim = 1
438 st%nst = int(st%qtot/2)
439 if (st%nst*2 < st%qtot) st%nst = st%nst + 1
440 st%d%nspin = 2
441 st%d%spin_channels = 2
442 case (spinors)
443 st%d%dim = 2
444 st%nst = int(st%qtot)
445 if (st%nst < st%qtot) st%nst = st%nst + 1
446 st%d%nspin = 4
447 st%d%spin_channels = 2
448 end select
449
450 if (ntot > 0) then
451 if (ntot < st%nst) then
452 message(1) = 'TotalStates is smaller than the number of states required by the system.'
453 call messages_fatal(1, namespace=namespace)
454 end if
455
456 st%nst = ntot
457 end if
458
459 if (nempty_percent > 0) then
460 nempty = ceiling(nempty_percent * st%nst / 100)
461 end if
462
463 st%nst_conv = st%nst + nempty_conv
464 st%nst = st%nst + nempty
465 if (st%nst == 0) then
466 message(1) = "Cannot run with number of states = zero."
467 call messages_fatal(1, namespace=namespace)
468 end if
469
470 !%Variable StatesBlockSize
471 !%Type integer
472 !%Section Execution::Optimization
473 !%Description
474 !% Some routines work over blocks of eigenfunctions, which
475 !% generally improves performance at the expense of increased
476 !% memory consumption. This variable selects the size of the
477 !% blocks to be used. If GPUs are used, the default is the
478 !% warp size (32 for NVIDIA, 32 or 64 for AMD);
479 !% otherwise it is 4.
480 !%End
481
482 if (accel_is_enabled()) then
483 ! Some AMD GPUs have a warp size of 64. When OpenCL is used
484 ! accel%warp_size = 1 which is why we use max(accel%warp_size, 32)
485 ! here so that StatesBlockSize is at least 32.
486 default = max(accel%warp_size, 32)
487 else
488 default = 4
489 end if
490
491 if (default > pad_pow2(st%nst)) default = pad_pow2(st%nst)
492
493 assert(default > 0)
494
495 call parse_variable(namespace, 'StatesBlockSize', default, st%block_size)
496 if (st%block_size < 1) then
497 call messages_write("The variable 'StatesBlockSize' must be greater than 0.")
498 call messages_fatal(namespace=namespace)
499 end if
500
501 st%block_size = min(st%block_size, st%nst)
502 conf%target_states_block_size = st%block_size
503
504 safe_allocate(st%eigenval(1:st%nst, 1:st%nik))
505 st%eigenval = huge(st%eigenval)
506
507 ! Periodic systems require complex wavefunctions
508 ! but not if it is Gamma-point only
509 if (.not. kpoints%gamma_only()) then
510 call states_set_complex(st)
511 end if
512
513 !%Variable OnlyUserDefinedInitialStates
514 !%Type logical
515 !%Default no
516 !%Section States
517 !%Description
518 !% If true, then only user-defined states from the block <tt>UserDefinedStates</tt>
519 !% will be used as initial states for a time-propagation. No attempt is made
520 !% to load ground-state orbitals from a previous ground-state run.
521 !%End
522 call parse_variable(namespace, 'OnlyUserDefinedInitialStates', .false., st%only_userdef_istates)
523
524 ! we now allocate some arrays
525 safe_allocate(st%occ (1:st%nst, 1:st%nik))
526 st%occ = m_zero
527 ! allocate space for formula strings that define user-defined states
528 if (parse_is_defined(namespace, 'UserDefinedStates') .or. parse_is_defined(namespace, 'OCTInitialUserdefined') &
529 .or. parse_is_defined(namespace, 'OCTTargetUserdefined')) then
530 safe_allocate(st%user_def_states(1:st%d%dim, 1:st%nst, 1:st%nik))
531 ! initially we mark all 'formulas' as undefined
532 st%user_def_states(1:st%d%dim, 1:st%nst, 1:st%nik) = 'undefined'
533 end if
534
535 if (st%d%ispin == spinors) then
536 safe_allocate(st%spin(1:3, 1:st%nst, 1:st%nik))
537 end if
538
539 !%Variable StatesRandomization
540 !%Type integer
541 !%Default par_independent
542 !%Section States
543 !%Description
544 !% The randomization of states can be done in two ways:
545 !% i) a parallelisation independent way (default), where the random states are identical,
546 !% irrespectively of the number of tasks and
547 !% ii) a parallelisation dependent way, which can prevent linear dependency
548 !% to occur for large systems.
549 !%Option par_independent 1
550 !% Parallelisation-independent randomization of states.
551 !%Option par_dependent 2
552 !% The randomization depends on the number of taks used in the calculation.
553 !%End
554 call parse_variable(namespace, 'StatesRandomization', par_independent, st%randomization)
555
556
557 call states_elec_read_initial_occs(st, namespace, excess_charge, kpoints)
558 call states_elec_read_initial_spins(st, namespace)
559
560 ! This test can only be done here, as smear_init is called inside states_elec_read_initial_occs, and
561 ! only there smear%photodop is set.
562
563 if (st%smear%photodop) then
564 if (nempty == 0) then
565 write(message(1), '(a,i5,a)') "PhotoDoping requires to specify ExtraStates."
566 message(2) = '(0 == ExtraStates)'
567 call messages_fatal(2, namespace=namespace)
568 end if
569 end if
570
571 st%st_start = 1
572 st%st_end = st%nst
573 st%lnst = st%nst
574 safe_allocate(st%node(1:st%nst))
575 st%node(1:st%nst) = 0
576
577 call mpi_grp_init(st%mpi_grp, mpi_comm_undefined)
578 st%parallel_in_states = .false.
579
580 call distributed_nullify(st%d%kpt, st%nik)
581
582 call modelmb_particles_init(st%modelmbparticles, namespace, space)
583 if (st%modelmbparticles%nparticle > 0) then
584 ! FIXME: check why this is not initialized properly in the test, or why it is written out when not initialized
585 safe_allocate(st%mmb_nspindown(1:st%modelmbparticles%ntype_of_particle, 1:st%nst))
586 st%mmb_nspindown(:,:) = -1
587 safe_allocate(st%mmb_iyoung(1:st%modelmbparticles%ntype_of_particle, 1:st%nst))
588 st%mmb_iyoung(:,:) = -1
589 safe_allocate(st%mmb_proj(1:st%nst))
590 st%mmb_proj(:) = m_zero
591 end if
592
593 !%Variable SymmetrizeDensity
594 !%Type logical
595 !%Default no
596 !%Section States
597 !%Description
598 !% When enabled the density is symmetrized. Currently, this can
599 !% only be done for periodic systems. (Experimental.)
600 !%End
601 call parse_variable(namespace, 'SymmetrizeDensity', kpoints%use_symmetries, st%symmetrize_density)
602 call messages_print_var_value('SymmetrizeDensity', st%symmetrize_density, namespace=namespace)
603
604 !%Variable ForceComplex
605 !%Type logical
606 !%Default no
607 !%Section Execution::Debug
608 !%Description
609 !% Normally <tt>Octopus</tt> determines automatically the type necessary
610 !% for the wavefunctions. When set to yes this variable will
611 !% force the use of complex wavefunctions.
612 !%
613 !% Warning: This variable is designed for testing and
614 !% benchmarking and normal users need not use it.
615 !%End
616 call parse_variable(namespace, 'ForceComplex', .false., force)
617
618 if (force) call states_set_complex(st)
619
620 st%packed = .false.
621
622 pop_sub(states_elec_init)
623 end subroutine states_elec_init
624
625 ! ---------------------------------------------------------
628 !
629 subroutine states_elec_look(restart, nik, dim, nst, ierr)
630 type(restart_t), intent(in) :: restart
631 integer, intent(out) :: nik
632 integer, intent(out) :: dim
633 integer, intent(out) :: nst
634 integer, intent(out) :: ierr
635
636 character(len=256) :: lines(3)
637 character(len=20) :: char
638 integer :: iunit
639
640 push_sub(states_elec_look)
641
642 ierr = 0
643
644 iunit = restart_open(restart, 'states')
645 call restart_read(restart, iunit, lines, 3, ierr)
646 if (ierr == 0) then
647 read(lines(1), *) char, nst
648 read(lines(2), *) char, dim
649 read(lines(3), *) char, nik
650 end if
651 call restart_close(restart, iunit)
652
653 pop_sub(states_elec_look)
654 end subroutine states_elec_look
655
656 ! ---------------------------------------------------------
665 !
666 subroutine states_elec_read_initial_occs(st, namespace, excess_charge, kpoints)
667 type(states_elec_t), intent(inout) :: st
668 type(namespace_t), intent(in) :: namespace
669 real(real64), intent(in) :: excess_charge
670 type(kpoints_t), intent(in) :: kpoints
671
672 integer :: ik, ist, ispin, nspin, ncols, nrows, el_per_state, icol, start_pos, spin_n
673 type(block_t) :: blk
674 real(real64) :: rr, charge
675 logical :: integral_occs, unoccupied_states
676 real(real64), allocatable :: read_occs(:, :)
677 real(real64) :: charge_in_block
678
680
681 !%Variable RestartFixedOccupations
682 !%Type logical
683 !%Default yes
684 !%Section States
685 !%Description
686 !% Setting this variable will make the restart proceed as
687 !% if the occupations from the previous calculation had been set via the <tt>Occupations</tt> block,
688 !% <i>i.e.</i> fixed. Otherwise, occupations will be determined by smearing.
689 !%End
690 call parse_variable(namespace, 'RestartFixedOccupations', .true., st%restart_fixed_occ)
691 ! we will turn on st%fixed_occ if restart_read is ever called
692
693 !%Variable Occupations
694 !%Type block
695 !%Section States
696 !%Description
697 !% The occupation numbers of the orbitals can be fixed through the use of this
698 !% variable. For example:
699 !%
700 !% <tt>%Occupations
701 !% <br>&nbsp;&nbsp;2 | 2 | 2 | 2 | 2
702 !% <br>%</tt>
703 !%
704 !% would fix the occupations of the five states to 2. There can be
705 !% at most as many columns as states in the calculation. If there are fewer columns
706 !% than states, then the code will assume that the user is indicating the occupations
707 !% of the uppermost states where all lower states have full occupation (i.e. 2 for spin-unpolarized
708 !% calculations, 1 otherwise) and all higher states have zero occupation. The first column
709 !% will be taken to refer to the lowest state such that the occupations would be consistent
710 !% with the correct total charge. For example, if there are 8 electrons and 10 states (from
711 !% <tt>ExtraStates = 6</tt>), then an abbreviated specification
712 !%
713 !% <tt>%Occupations
714 !% <br>&nbsp;&nbsp;1 | 0 | 1
715 !% <br>%</tt>
716 !%
717 !% would be equivalent to a full specification
718 !%
719 !% <tt>%Occupations
720 !% <br>&nbsp;&nbsp;2 | 2 | 2 | 1 | 0 | 1 | 0 | 0 | 0 | 0
721 !% <br>%</tt>
722 !%
723 !% This is an example of use for constrained density-functional theory,
724 !% crudely emulating a HOMO->LUMO+1 optical excitation.
725 !% The number of rows should be equal
726 !% to the number of k-points times the number of spins. For example, for a finite system
727 !% with <tt>SpinComponents == spin_polarized</tt>,
728 !% this block should contain two lines, one for each spin channel.
729 !% All rows must have the same number of columns.
730 !%
731 !% The <tt>Occupations</tt> block is useful for the ground state of highly symmetric
732 !% small systems (like an open-shell atom), to fix the occupation numbers
733 !% of degenerate states in order to help <tt>octopus</tt> to converge. This is to
734 !% be used in conjuction with <tt>ExtraStates</tt>. For example, to calculate the
735 !% carbon atom, one would do:
736 !%
737 !% <tt>ExtraStates = 2
738 !% <br>%Occupations
739 !% <br>&nbsp;&nbsp;2 | 2/3 | 2/3 | 2/3
740 !% <br>%</tt>
741 !%
742 !% If you want the calculation to be spin-polarized (which makes more sense), you could do:
743 !%
744 !% <tt>ExtraStates = 2
745 !% <br>%Occupations
746 !% <br>&nbsp;&nbsp; 2/3 | 2/3 | 2/3
747 !% <br>&nbsp;&nbsp; 0 | 0 | 0
748 !% <br>%</tt>
749 !%
750 !% Note that in this case the first state is absent, the code will calculate four states
751 !% (two because there are four electrons, plus two because <tt>ExtraStates</tt> = 2), and since
752 !% it finds only three columns, it will occupy the first state with one electron for each
753 !% of the spin options.
754 !%
755 !% If the sum of occupations is not equal to the total charge set by <tt>ExcessCharge</tt>,
756 !% an error message is printed.
757 !% If <tt>FromScratch = no</tt> and <tt>RestartFixedOccupations = yes</tt>,
758 !% this block will be ignored.
759 !%End
760
761 integral_occs = .true.
762
763 occ_fix: if (parse_block(namespace, 'Occupations', blk) == 0) then
764 ! read in occupations
765 st%fixed_occ = .true.
766
767 ncols = parse_block_cols(blk, 0)
768 if (ncols > st%nst) then
769 call messages_input_error(namespace, "Occupations", "Too many columns in block Occupations.")
770 end if
771
772 nrows = parse_block_n(blk)
773 if (nrows /= st%nik) then
774 call messages_input_error(namespace, "Occupations", "Wrong number of rows in block Occupations.")
775 end if
776
777 do ik = 1, st%nik - 1
778 if (parse_block_cols(blk, ik) /= ncols) then
779 call messages_input_error(namespace, "Occupations", &
780 "All rows in block Occupations must have the same number of columns.")
781 end if
782 end do
783
784 ! Now we fill all the "missing" states with the maximum occupation.
785 if (st%d%ispin == unpolarized) then
786 el_per_state = 2
787 else
788 el_per_state = 1
789 end if
790
791 safe_allocate(read_occs(1:ncols, 1:st%nik))
792
793 charge_in_block = m_zero
794 do ik = 1, st%nik
795 do icol = 1, ncols
796 call parse_block_float(blk, ik - 1, icol - 1, read_occs(icol, ik))
797 charge_in_block = charge_in_block + read_occs(icol, ik) * st%kweights(ik)
798 end do
799 end do
800
801 spin_n = 2
802 select case (st%d%ispin)
803 case (unpolarized)
804 spin_n = 2
805 case (spin_polarized)
806 spin_n = 2
807 case (spinors)
808 spin_n = 1
809 end select
810
811 start_pos = nint((st%qtot - charge_in_block)/spin_n)
812
813 if (start_pos + ncols > st%nst) then
814 message(1) = "To balance charge, the first column in block Occupations is taken to refer to state"
815 write(message(2),'(a,i6,a)') "number ", start_pos, " but there are too many columns for the number of states."
816 write(message(3),'(a,i6,a)') "Solution: set ExtraStates = ", start_pos + ncols - st%nst
817 call messages_fatal(3, namespace=namespace)
818 end if
819
820 do ik = 1, st%nik
821 do ist = 1, start_pos
822 st%occ(ist, ik) = el_per_state
823 end do
824 end do
825
826 do ik = 1, st%nik
827 do ist = start_pos + 1, start_pos + ncols
828 st%occ(ist, ik) = read_occs(ist - start_pos, ik)
829 integral_occs = integral_occs .and. &
830 abs((st%occ(ist, ik) - el_per_state) * st%occ(ist, ik)) <= m_epsilon
831 end do
832 end do
833
834 do ik = 1, st%nik
835 do ist = start_pos + ncols + 1, st%nst
836 st%occ(ist, ik) = m_zero
837 end do
838 end do
839
840 call parse_block_end(blk)
841
842 safe_deallocate_a(read_occs)
843
844 else
845 st%fixed_occ = .false.
846 integral_occs = .false.
847
848 ! first guess for occupation...paramagnetic configuration
849 rr = m_one
850 if (st%d%ispin == unpolarized) rr = m_two
851
852 st%occ = m_zero
853 st%qtot = -(st%val_charge + excess_charge)
854
855 nspin = 1
856 if (st%d%nspin == 2) nspin = 2
857
858 do ik = 1, st%nik, nspin
859 charge = m_zero
860 do ist = 1, st%nst
861 do ispin = ik, ik + nspin - 1
862 st%occ(ist, ispin) = min(rr, -(st%val_charge + excess_charge) - charge)
863 charge = charge + st%occ(ist, ispin)
864 end do
865 end do
866 end do
867
868 end if occ_fix
869
870 !%Variable RestartReorderOccs
871 !%Type logical
872 !%Default no
873 !%Section States
874 !%Description
875 !% Consider doing a ground-state calculation, and then restarting with new occupations set
876 !% with the <tt>Occupations</tt> block, in an attempt to populate the orbitals of the original
877 !% calculation. However, the eigenvalues may reorder as the density changes, in which case the
878 !% occupations will now be referring to different orbitals. Setting this variable to yes will
879 !% try to solve this issue when the restart data is being read, by reordering the occupations
880 !% according to the order of the expectation values of the restart wavefunctions.
881 !%End
882 if (st%fixed_occ) then
883 call parse_variable(namespace, 'RestartReorderOccs', .false., st%restart_reorder_occs)
884 else
885 st%restart_reorder_occs = .false.
886 end if
887
888 call smear_init(st%smear, namespace, st%d%ispin, st%fixed_occ, integral_occs, kpoints)
889
890 unoccupied_states = (st%d%ispin /= spinors .and. st%nst*2 > st%qtot) .or. (st%d%ispin == spinors .and. st%nst > st%qtot)
891
892 if (.not. smear_is_semiconducting(st%smear) .and. .not. st%smear%method == smear_fixed_occ) then
893 if (.not. unoccupied_states) then
894 call messages_write('Smearing needs unoccupied states (via ExtraStates or TotalStates) to be useful.')
895 call messages_warning(namespace=namespace)
896 end if
897 end if
898
899 ! sanity check
900 charge = m_zero
901 do ist = 1, st%nst
902 charge = charge + sum(st%occ(ist, 1:st%nik) * st%kweights(1:st%nik))
903 end do
904 if (abs(charge - st%qtot) > 1e-6_real64) then
905 message(1) = "Initial occupations do not integrate to total charge."
906 write(message(2), '(6x,f12.6,a,f12.6)') charge, ' != ', st%qtot
907 call messages_fatal(2, only_root_writes = .true., namespace=namespace)
908 end if
909
910 st%uniform_occ = smear_is_semiconducting(st%smear) .and. .not. unoccupied_states
911
913 end subroutine states_elec_read_initial_occs
914
915
916 ! ---------------------------------------------------------
923 !
924 subroutine states_elec_read_initial_spins(st, namespace)
925 type(states_elec_t), intent(inout) :: st
926 type(namespace_t), intent(in) :: namespace
927
928 integer :: i, j
929 type(block_t) :: blk
930
932
933 st%fixed_spins = .false.
934 if (st%d%ispin /= spinors) then
936 return
937 end if
938
939 !%Variable InitialSpins
940 !%Type block
941 !%Section States
942 !%Description
943 !% The spin character of the initial random guesses for the spinors can
944 !% be fixed by making use of this block. Note that this will not "fix" the
945 !% the spins during the calculation (this cannot be done in spinors mode, in
946 !% being able to change the spins is why the spinors mode exists in the first
947 !% place).
948 !%
949 !% This block is meaningless and ignored if the run is not in spinors mode
950 !% (<tt>SpinComponents = spinors</tt>).
951 !%
952 !% The structure of the block is very simple: each column contains the desired
953 !% <math>\left< S_x \right>, \left< S_y \right>, \left< S_z \right> </math> for each spinor.
954 !% If the calculation is for a periodic system
955 !% and there is more than one <i>k</i>-point, the spins of all the <i>k</i>-points are
956 !% the same.
957 !%
958 !% For example, if we have two spinors, and we want one in the <math>S_x</math> "down" state,
959 !% and another one in the <math>S_x</math> "up" state:
960 !%
961 !% <tt>%InitialSpins
962 !% <br>&nbsp;&nbsp;&nbsp; 0.5 | 0.0 | 0.0
963 !% <br>&nbsp;&nbsp; -0.5 | 0.0 | 0.0
964 !% <br>%</tt>
965 !%
966 !% WARNING: if the calculation is for a system described by pseudopotentials (as
967 !% opposed to user-defined potentials or model systems), this option is
968 !% meaningless since the random spinors are overwritten by the atomic orbitals.
969 !%
970 !% This constraint must be fulfilled:
971 !% <br><math> \left< S_x \right>^2 + \left< S_y \right>^2 + \left< S_z \right>^2 = \frac{1}{4} </math>
972 !%End
973 spin_fix: if (parse_block(namespace, 'InitialSpins', blk) == 0) then
974 do i = 1, st%nst
975 do j = 1, 3
976 call parse_block_float(blk, i-1, j-1, st%spin(j, i, 1))
977 end do
978 if (abs(sum(st%spin(1:3, i, 1)**2) - m_fourth) > 1.0e-6_real64) call messages_input_error(namespace, 'InitialSpins')
979 end do
980 call parse_block_end(blk)
981 st%fixed_spins = .true.
982 do i = 2, st%nik
983 st%spin(:, :, i) = st%spin(:, :, 1)
984 end do
985 end if spin_fix
986
988 end subroutine states_elec_read_initial_spins
989
990
991 ! ---------------------------------------------------------
993 !
994 subroutine states_elec_allocate_wfns(st, mesh, wfs_type, skip, packed)
995 type(states_elec_t), intent(inout) :: st
996 class(mesh_t), intent(in) :: mesh
997 type(type_t), optional, intent(in) :: wfs_type
998 logical, optional, intent(in) :: skip(:)
999 logical, optional, intent(in) :: packed
1000
1002
1003 if (present(wfs_type)) then
1004 assert(wfs_type == type_float .or. wfs_type == type_cmplx)
1005 st%wfs_type = wfs_type
1006 end if
1007
1008 call states_elec_init_block(st, mesh, skip = skip, packed=packed)
1009 call states_elec_set_zero(st)
1010
1012 end subroutine states_elec_allocate_wfns
1013
1014 !---------------------------------------------------------------------
1031 subroutine states_elec_init_block(st, mesh, verbose, skip, packed)
1032 type(states_elec_t), intent(inout) :: st
1033 type(mesh_t), intent(in) :: mesh
1034 logical, optional, intent(in) :: verbose
1035 logical, optional, intent(in) :: skip(:)
1036 logical, optional, intent(in) :: packed
1037
1038 integer :: ib, iqn, ist, istmin, istmax
1039 logical :: same_node, verbose_, packed_
1040 integer, allocatable :: bstart(:), bend(:)
1041
1042 push_sub(states_elec_init_block)
1043
1044 safe_allocate(bstart(1:st%nst))
1045 safe_allocate(bend(1:st%nst))
1046 safe_allocate(st%group%iblock(1:st%nst))
1047
1048 st%group%iblock = 0
1049
1050 verbose_ = optional_default(verbose, .true.)
1051 packed_ = optional_default(packed, .false.)
1052
1053 !In case we have a list of state to skip, we do not allocate them
1054 istmin = 1
1055 if (present(skip)) then
1056 do ist = 1, st%nst
1057 if (.not. skip(ist)) then
1058 istmin = ist
1059 exit
1060 end if
1061 end do
1062 end if
1063
1064 istmax = st%nst
1065 if (present(skip)) then
1066 do ist = st%nst, istmin, -1
1067 if (.not. skip(ist)) then
1068 istmax = ist
1069 exit
1070 end if
1071 end do
1072 end if
1073
1074 if (present(skip) .and. verbose_) then
1075 call messages_write('Info: Allocating states from ')
1076 call messages_write(istmin, fmt = 'i8')
1077 call messages_write(' to ')
1078 call messages_write(istmax, fmt = 'i8')
1079 call messages_info()
1080 end if
1081
1082 ! count and assign blocks
1083 ib = 0
1084 st%group%nblocks = 0
1085 bstart(1) = istmin
1086 do ist = istmin, istmax
1087 ib = ib + 1
1088
1089 st%group%iblock(ist) = st%group%nblocks + 1
1090
1091 same_node = .true.
1092 if (st%parallel_in_states .and. ist /= istmax) then
1093 ! We have to avoid that states that are in different nodes end
1094 ! up in the same block
1095 same_node = (st%node(ist + 1) == st%node(ist))
1096 end if
1097
1098 if (ib == st%block_size .or. ist == istmax .or. .not. same_node) then
1099 ib = 0
1100 st%group%nblocks = st%group%nblocks + 1
1101 bend(st%group%nblocks) = ist
1102 if (ist /= istmax) bstart(st%group%nblocks + 1) = ist + 1
1103 end if
1104 end do
1105
1106 safe_allocate(st%group%psib(1:st%group%nblocks, st%d%kpt%start:st%d%kpt%end))
1107
1108 safe_allocate(st%group%block_is_local(1:st%group%nblocks, 1:st%nik))
1109 st%group%block_is_local = .false.
1110 st%group%block_start = -1
1111 st%group%block_end = -2 ! this will make that loops block_start:block_end do not run if not initialized
1112
1113 do ib = 1, st%group%nblocks
1114 if (bstart(ib) >= st%st_start .and. bend(ib) <= st%st_end) then
1115 if (st%group%block_start == -1) st%group%block_start = ib
1116 st%group%block_end = ib
1117 do iqn = st%d%kpt%start, st%d%kpt%end
1118 st%group%block_is_local(ib, iqn) = .true.
1119
1120 if (states_are_real(st)) then
1121 call dwfs_elec_init(st%group%psib(ib, iqn), st%d%dim, bstart(ib), bend(ib), mesh%np_part, iqn, &
1122 special=.true., packed=packed_)
1123 else
1124 call zwfs_elec_init(st%group%psib(ib, iqn), st%d%dim, bstart(ib), bend(ib), mesh%np_part, iqn, &
1125 special=.true., packed=packed_)
1126 end if
1127
1128 end do
1129 end if
1130 end do
1131
1132 safe_allocate(st%group%block_range(1:st%group%nblocks, 1:2))
1133 safe_allocate(st%group%block_size(1:st%group%nblocks))
1134
1135 st%group%block_range(1:st%group%nblocks, 1) = bstart(1:st%group%nblocks)
1136 st%group%block_range(1:st%group%nblocks, 2) = bend(1:st%group%nblocks)
1137 st%group%block_size(1:st%group%nblocks) = bend(1:st%group%nblocks) - bstart(1:st%group%nblocks) + 1
1138
1139 st%group%block_initialized = .true.
1140
1141 safe_allocate(st%group%block_node(1:st%group%nblocks))
1142
1143 assert(allocated(st%node))
1144 assert(all(st%node >= 0) .and. all(st%node < st%mpi_grp%size))
1145
1146 do ib = 1, st%group%nblocks
1147 st%group%block_node(ib) = st%node(st%group%block_range(ib, 1))
1148 assert(st%group%block_node(ib) == st%node(st%group%block_range(ib, 2)))
1149 end do
1150
1151 if (verbose_) then
1152 call messages_write('Info: Blocks of states')
1153 call messages_info()
1154 do ib = 1, st%group%nblocks
1155 call messages_write(' Block ')
1156 call messages_write(ib, fmt = 'i8')
1157 call messages_write(' contains ')
1158 call messages_write(st%group%block_size(ib), fmt = 'i8')
1159 call messages_write(' states')
1160 if (st%group%block_size(ib) > 0) then
1161 call messages_write(':')
1162 call messages_write(st%group%block_range(ib, 1), fmt = 'i8')
1163 call messages_write(' - ')
1164 call messages_write(st%group%block_range(ib, 2), fmt = 'i8')
1165 end if
1166 call messages_info()
1167 end do
1168 end if
1169
1170!!$!!!!DEBUG
1171!!$ ! some debug output that I will keep here for the moment
1172!!$ if (mpi_grp_is_root(mpi_world)) then
1173!!$ print*, "NST ", st%nst
1174!!$ print*, "BLOCKSIZE ", st%block_size
1175!!$ print*, "NBLOCKS ", st%group%nblocks
1176!!$
1177!!$ print*, "==============="
1178!!$ do ist = 1, st%nst
1179!!$ print*, st%node(ist), ist, st%group%iblock(ist)
1180!!$ end do
1181!!$ print*, "==============="
1182!!$
1183!!$ do ib = 1, st%group%nblocks
1184!!$ print*, ib, bstart(ib), bend(ib)
1185!!$ end do
1186!!$
1187!!$ end if
1188!!$!!!!ENDOFDEBUG
1189
1190 safe_deallocate_a(bstart)
1191 safe_deallocate_a(bend)
1192 pop_sub(states_elec_init_block)
1193 end subroutine states_elec_init_block
1194
1195
1196 ! ---------------------------------------------------------
1198 subroutine states_elec_deallocate_wfns(st)
1199 type(states_elec_t), intent(inout) :: st
1200
1202
1203 call states_elec_group_end(st%group, st%d)
1204
1206 end subroutine states_elec_deallocate_wfns
1207
1208
1209 ! ---------------------------------------------------------
1210 subroutine states_elec_densities_init(st, gr)
1211 type(states_elec_t), target, intent(inout) :: st
1212 type(grid_t), intent(in) :: gr
1213
1214 real(real64) :: fsize
1215
1217
1218 safe_allocate(st%rho(1:gr%np_part, 1:st%d%nspin))
1219 st%rho = m_zero
1220
1221 fsize = gr%np_part*8.0_real64*st%block_size
1222
1223 call messages_write('Info: states-block size = ')
1224 call messages_write(fsize, fmt = '(f10.1)', align_left = .true., units = unit_megabytes, print_units = .true.)
1225 call messages_info()
1226
1228 end subroutine states_elec_densities_init
1229
1230 !---------------------------------------------------------------------
1231 subroutine states_elec_allocate_current(st, space, mesh)
1232 type(states_elec_t), intent(inout) :: st
1233 class(space_t), intent(in) :: space
1234 class(mesh_t), intent(in) :: mesh
1235
1237
1238 if (.not. allocated(st%current)) then
1239 safe_allocate(st%current(1:mesh%np_part, 1:space%dim, 1:st%d%nspin))
1240 st%current = m_zero
1241 end if
1242
1243 if (.not. allocated(st%current_para)) then
1244 safe_allocate(st%current_para(1:mesh%np_part, 1:space%dim, 1:st%d%nspin))
1245 st%current_para = m_zero
1246 end if
1247
1248 if (.not. allocated(st%current_dia)) then
1249 safe_allocate(st%current_dia(1:mesh%np_part, 1:space%dim, 1:st%d%nspin))
1250 st%current_dia= m_zero
1251 end if
1252
1253 if (.not. allocated(st%current_mag)) then
1254 safe_allocate(st%current_mag(1:mesh%np_part, 1:space%dim, 1:st%d%nspin))
1255 st%current_mag= m_zero
1256 end if
1257
1258 if (.not. allocated(st%current_kpt)) then
1259 safe_allocate(st%current_kpt(1:mesh%np,1:space%dim,st%d%kpt%start:st%d%kpt%end))
1260 st%current_kpt = m_zero
1261 end if
1262
1264 end subroutine states_elec_allocate_current
1265
1266 !---------------------------------------------------------------------
1274 subroutine states_elec_exec_init(st, namespace, mc)
1275 type(states_elec_t), intent(inout) :: st
1276 type(namespace_t), intent(in) :: namespace
1277 type(multicomm_t), intent(in) :: mc
1278
1279 integer :: default
1280
1281 push_sub(states_elec_exec_init)
1282
1283 !%Variable StatesPack
1284 !%Type logical
1285 !%Section Execution::Optimization
1286 !%Description
1287 !% When set to yes, states are stored in packed mode, which improves
1288 !% performance considerably. Not all parts of the code will profit from
1289 !% this, but should nevertheless work regardless of how the states are
1290 !% stored.
1292 !% If GPUs are used and this variable is set to yes, Octopus
1293 !% will store the wave-functions in device (GPU) memory. If
1294 !% there is not enough memory to store all the wave-functions,
1295 !% execution will stop with an error.
1296 !%
1297 !% See also the related <tt>HamiltonianApplyPacked</tt> variable.
1298 !%
1299 !% The default is yes.
1300 !%End
1301
1302 call parse_variable(namespace, 'StatesPack', .true., st%pack_states)
1304 call messages_print_var_value('StatesPack', st%pack_states, namespace=namespace)
1305
1306 call messages_obsolete_variable(namespace, 'StatesMirror')
1307
1308 !%Variable StatesOrthogonalization
1309 !%Type integer
1310 !%Section SCF::Eigensolver
1311 !%Description
1312 !% The full orthogonalization method used by some
1313 !% eigensolvers. The default is <tt>cholesky_serial</tt>, except with state
1314 !% parallelization, the default is <tt>cholesky_parallel</tt>.
1315 !%Option cholesky_serial 1
1316 !% Cholesky decomposition implemented using
1317 !% BLAS/LAPACK. Can be used with domain parallelization but not
1318 !% state parallelization.
1319 !%Option cholesky_parallel 2
1320 !% Cholesky decomposition implemented using
1321 !% ScaLAPACK. Compatible with states parallelization.
1322 !%Option cgs 3
1323 !% Classical Gram-Schmidt (CGS) orthogonalization.
1324 !% Can be used with domain parallelization but not state parallelization.
1325 !% The algorithm is defined in Giraud et al., Computers and Mathematics with Applications 50, 1069 (2005).
1326 !%Option mgs 4
1327 !% Modified Gram-Schmidt (MGS) orthogonalization.
1328 !% Can be used with domain parallelization but not state parallelization.
1329 !% The algorithm is defined in Giraud et al., Computers and Mathematics with Applications 50, 1069 (2005).
1330 !%Option drcgs 5
1331 !% Classical Gram-Schmidt orthogonalization with double-step reorthogonalization.
1332 !% Can be used with domain parallelization but not state parallelization.
1333 !% The algorithm is taken from Giraud et al., Computers and Mathematics with Applications 50, 1069 (2005).
1334 !% According to this reference, this is much more precise than CGS or MGS algorithms. The MGS version seems not to improve much the stability and would require more communications over the domains.
1335 !%End
1336
1337 default = option__statesorthogonalization__cholesky_serial
1338#ifdef HAVE_SCALAPACK
1340 default = option__statesorthogonalization__cholesky_parallel
1341 end if
1342#endif
1343
1344 call parse_variable(namespace, 'StatesOrthogonalization', default, st%orth_method)
1345
1346 if (.not. varinfo_valid_option('StatesOrthogonalization', st%orth_method)) then
1347 call messages_input_error(namespace, 'StatesOrthogonalization')
1348 end if
1349 call messages_print_var_option('StatesOrthogonalization', st%orth_method, namespace=namespace)
1350
1351 !%Variable StatesCLDeviceMemory
1352 !%Type float
1353 !%Section Execution::Optimization
1354 !%Default -512
1355 !%Description
1356 !% This variable selects the amount of OpenCL device memory that
1357 !% will be used by Octopus to store the states.
1358 !%
1359 !% A positive number smaller than 1 indicates a fraction of the total
1360 !% device memory. A number larger than one indicates an absolute
1361 !% amount of memory in megabytes. A negative number indicates an
1362 !% amount of memory in megabytes that would be subtracted from
1363 !% the total device memory.
1364 !%End
1365 call parse_variable(namespace, 'StatesCLDeviceMemory', -512.0_real64, st%cl_states_mem)
1366
1368 end subroutine states_elec_exec_init
1369
1370
1371 ! ---------------------------------------------------------
1373 !
1374 subroutine states_elec_copy(stout, stin, exclude_wfns, exclude_eigenval, special)
1375 type(states_elec_t), target, intent(inout) :: stout
1376 type(states_elec_t), intent(in) :: stin
1377 logical, optional, intent(in) :: exclude_wfns
1378 logical, optional, intent(in) :: exclude_eigenval
1379 logical, optional, intent(in) :: special
1380
1381 logical :: exclude_wfns_
1382
1383 push_sub(states_elec_copy)
1384
1385 exclude_wfns_ = optional_default(exclude_wfns, .false.)
1386
1387 call states_elec_null(stout)
1388
1389 call states_elec_dim_copy(stout%d, stin%d)
1390 safe_allocate_source_a(stout%kweights, stin%kweights)
1391 stout%nik = stin%nik
1392
1393 call modelmb_particles_copy(stout%modelmbparticles, stin%modelmbparticles)
1394 if (stin%modelmbparticles%nparticle > 0) then
1395 safe_allocate_source_a(stout%mmb_nspindown, stin%mmb_nspindown)
1396 safe_allocate_source_a(stout%mmb_iyoung, stin%mmb_iyoung)
1397 safe_allocate_source_a(stout%mmb_proj, stin%mmb_proj)
1398 end if
1399
1400 stout%wfs_type = stin%wfs_type
1401 stout%nst = stin%nst
1402 stout%block_size = stin%block_size
1403 stout%orth_method = stin%orth_method
1404
1405 stout%cl_states_mem = stin%cl_states_mem
1406 stout%pack_states = stin%pack_states
1407
1408
1409 stout%only_userdef_istates = stin%only_userdef_istates
1410
1411 if (.not. exclude_wfns_) then
1412 safe_allocate_source_a(stout%rho, stin%rho)
1413 end if
1414
1415 stout%uniform_occ = stin%uniform_occ
1416
1417 if (.not. optional_default(exclude_eigenval, .false.)) then
1418 safe_allocate_source_a(stout%eigenval, stin%eigenval)
1419 safe_allocate_source_a(stout%occ, stin%occ)
1420 safe_allocate_source_a(stout%spin, stin%spin)
1421 end if
1422
1423 ! the call to init_block is done at the end of this subroutine
1424 ! it allocates iblock, psib, block_is_local
1425 stout%group%nblocks = stin%group%nblocks
1426
1427 safe_allocate_source_a(stout%user_def_states, stin%user_def_states)
1428
1429 safe_allocate_source_a(stout%current, stin%current)
1430 safe_allocate_source_a(stout%current_kpt, stin%current_kpt)
1431 safe_allocate_source_a(stout%rho_core, stin%rho_core)
1432 safe_allocate_source_a(stout%frozen_rho, stin%frozen_rho)
1433 safe_allocate_source_a(stout%frozen_tau, stin%frozen_tau)
1434 safe_allocate_source_a(stout%frozen_gdens, stin%frozen_gdens)
1435 safe_allocate_source_a(stout%frozen_ldens, stin%frozen_ldens)
1436
1437 stout%fixed_occ = stin%fixed_occ
1438 stout%restart_fixed_occ = stin%restart_fixed_occ
1439
1440 stout%fixed_spins = stin%fixed_spins
1441
1442 stout%qtot = stin%qtot
1443 stout%val_charge = stin%val_charge
1444
1445 call smear_copy(stout%smear, stin%smear)
1446
1447 stout%parallel_in_states = stin%parallel_in_states
1448 call mpi_grp_copy(stout%mpi_grp, stin%mpi_grp)
1449 call mpi_grp_copy(stout%dom_st_kpt_mpi_grp, stin%dom_st_kpt_mpi_grp)
1450 call mpi_grp_copy(stout%st_kpt_mpi_grp, stin%st_kpt_mpi_grp)
1451 call mpi_grp_copy(stout%dom_st_mpi_grp, stin%dom_st_mpi_grp)
1452 safe_allocate_source_a(stout%node, stin%node)
1453 safe_allocate_source_a(stout%st_kpt_task, stin%st_kpt_task)
1454
1455#ifdef HAVE_SCALAPACK
1456 call blacs_proc_grid_copy(stin%dom_st_proc_grid, stout%dom_st_proc_grid)
1457#endif
1458
1459 call distributed_copy(stin%dist, stout%dist)
1460
1461 stout%scalapack_compatible = stin%scalapack_compatible
1462
1463 stout%lnst = stin%lnst
1464 stout%st_start = stin%st_start
1465 stout%st_end = stin%st_end
1466
1467 if (stin%parallel_in_states) call multicomm_all_pairs_copy(stout%ap, stin%ap)
1468
1469 stout%symmetrize_density = stin%symmetrize_density
1470
1471 if (.not. exclude_wfns_) call states_elec_group_copy(stin%d, stin%group, stout%group, special=special)
1472
1473 stout%packed = stin%packed
1474
1475 stout%randomization = stin%randomization
1476
1477 pop_sub(states_elec_copy)
1478 end subroutine states_elec_copy
1479
1480
1481 ! ---------------------------------------------------------
1483 !
1484 subroutine states_elec_end(st)
1485 type(states_elec_t), intent(inout) :: st
1486
1487 push_sub(states_elec_end)
1488
1489 call states_elec_dim_end(st%d)
1490
1491 if (st%modelmbparticles%nparticle > 0) then
1492 safe_deallocate_a(st%mmb_nspindown)
1493 safe_deallocate_a(st%mmb_iyoung)
1494 safe_deallocate_a(st%mmb_proj)
1495 end if
1496 call modelmb_particles_end(st%modelmbparticles)
1497
1498 ! this deallocates dpsi, zpsi, psib, iblock, iblock
1500
1501 safe_deallocate_a(st%user_def_states)
1502
1503 safe_deallocate_a(st%rho)
1504 safe_deallocate_a(st%eigenval)
1505
1506 safe_deallocate_a(st%current)
1507 safe_deallocate_a(st%current_para)
1508 safe_deallocate_a(st%current_dia)
1509 safe_deallocate_a(st%current_mag)
1510 safe_deallocate_a(st%current_kpt)
1511 safe_deallocate_a(st%rho_core)
1512 safe_deallocate_a(st%frozen_rho)
1513 safe_deallocate_a(st%frozen_tau)
1514 safe_deallocate_a(st%frozen_gdens)
1515 safe_deallocate_a(st%frozen_ldens)
1516 safe_deallocate_a(st%occ)
1517 safe_deallocate_a(st%spin)
1518 safe_deallocate_a(st%kweights)
1519
1520
1521#ifdef HAVE_SCALAPACK
1522 call blacs_proc_grid_end(st%dom_st_proc_grid)
1523#endif
1524
1525 call distributed_end(st%dist)
1526
1527 safe_deallocate_a(st%node)
1528 safe_deallocate_a(st%st_kpt_task)
1529
1530 if (st%parallel_in_states) then
1531 safe_deallocate_a(st%ap%schedule)
1532 end if
1533
1534 pop_sub(states_elec_end)
1535 end subroutine states_elec_end
1536
1537
1538 ! TODO(Alex) Issue #684. Abstract duplicate code in states_elec_generate_random, to get it to
1539 ! a point where it can be refactored.
1541 subroutine states_elec_generate_random(st, mesh, kpoints, ist_start_, ist_end_, ikpt_start_, ikpt_end_, normalized)
1542 type(states_elec_t), intent(inout) :: st
1543 class(mesh_t), intent(in) :: mesh
1544 type(kpoints_t), intent(in) :: kpoints
1545 integer, optional, intent(in) :: ist_start_
1546 integer, optional, intent(in) :: ist_end_
1547 integer, optional, intent(in) :: ikpt_start_
1548 integer, optional, intent(in) :: ikpt_end_
1549 logical, optional, intent(in) :: normalized
1551
1552 integer :: ist, ik, id, ist_start, ist_end, jst, ikpt_start, ikpt_end
1553 complex(real64) :: alpha, beta
1554 real(real64), allocatable :: dpsi(:, :)
1555 complex(real64), allocatable :: zpsi(:, :), zpsi2(:)
1556 integer :: ikpoint, ip
1557 type(batch_t) :: ffb
1558
1559 logical :: normalized_
1560
1561 normalized_ = optional_default(normalized, .true.)
1562
1564
1565 ist_start = optional_default(ist_start_, 1)
1566 ist_end = optional_default(ist_end_, st%nst)
1567 ikpt_start = optional_default(ikpt_start_, 1)
1568 ikpt_end = optional_default(ikpt_end_, st%nik)
1569
1570 safe_allocate(dpsi(1:mesh%np, 1:st%d%dim))
1571 if (states_are_complex(st)) then
1572 safe_allocate(zpsi(1:mesh%np, 1:st%d%dim))
1573 end if
1574
1575 select case (st%d%ispin)
1578 do ik = ikpt_start, ikpt_end
1579 ikpoint = st%d%get_kpoint_index(ik)
1580 do ist = ist_start, ist_end
1581 if (states_are_real(st).or.kpoints_point_is_gamma(kpoints, ikpoint)) then
1582 if (st%randomization == par_independent) then
1583 call dmf_random(mesh, dpsi(:, 1), &
1584 pre_shift = mesh%pv%xlocal-1, &
1585 post_shift = mesh%pv%np_global - mesh%pv%xlocal - mesh%np + 1, &
1586 normalized = normalized)
1587 ! Ensures that the grid points are properly distributed in the domain parallel case
1588 if(mesh%parallel_in_domains) then
1589 call batch_init(ffb, dpsi(:,1))
1590 call dmesh_batch_exchange_points(mesh, ffb, backward_map = .true.)
1591 call ffb%end()
1592 end if
1593 else
1594 call dmf_random(mesh, dpsi(:, 1), normalized = normalized)
1595 end if
1596 if (.not. state_kpt_is_local(st, ist, ik)) cycle
1597 if (states_are_complex(st)) then !Gamma point
1598 do ip = 1, mesh%np
1599 zpsi(ip,1) = cmplx(dpsi(ip,1), m_zero, real64)
1600 end do
1601 call states_elec_set_state(st, mesh, ist, ik, zpsi)
1602 else
1603 call states_elec_set_state(st, mesh, ist, ik, dpsi)
1604 end if
1605 else
1606 if (st%randomization == par_independent) then
1607 call zmf_random(mesh, zpsi(:, 1), &
1608 pre_shift = mesh%pv%xlocal-1, &
1609 post_shift = mesh%pv%np_global - mesh%pv%xlocal - mesh%np + 1, &
1610 normalized = normalized)
1611 ! Ensures that the grid points are properly distributed in the domain parallel case
1612 if(mesh%parallel_in_domains) then
1613 call batch_init(ffb, zpsi(:,1))
1614 call zmesh_batch_exchange_points(mesh, ffb, backward_map = .true.)
1615 call ffb%end()
1616 end if
1617 else
1618 call zmf_random(mesh, zpsi(:, 1), normalized = normalized)
1619 end if
1620 if (.not. state_kpt_is_local(st, ist, ik)) cycle
1621 call states_elec_set_state(st, mesh, ist, ik, zpsi)
1622 end if
1623 end do
1624 end do
1625
1626 case (spinors)
1627
1628 assert(states_are_complex(st))
1629
1630 if (st%fixed_spins) then
1631
1632 do ik = ikpt_start, ikpt_end
1633 ikpoint = st%d%get_kpoint_index(ik)
1634 do ist = ist_start, ist_end
1635 if (kpoints_point_is_gamma(kpoints, ikpoint)) then
1636 if (st%randomization == par_independent) then
1637 call dmf_random(mesh, dpsi(:, 1), &
1638 pre_shift = mesh%pv%xlocal-1, &
1639 post_shift = mesh%pv%np_global - mesh%pv%xlocal - mesh%np + 1, &
1640 normalized = normalized)
1641 ! Ensures that the grid points are properly distributed in the domain parallel case
1642 if(mesh%parallel_in_domains) then
1643 call batch_init(ffb, dpsi(:,1))
1644 call dmesh_batch_exchange_points(mesh, ffb, backward_map = .true.)
1645 call ffb%end()
1646 end if
1647 else
1648 call dmf_random(mesh, dpsi(:, 1), normalized = normalized)
1649 if (.not. state_kpt_is_local(st, ist, ik)) cycle
1650 end if
1651 do ip = 1, mesh%np
1652 zpsi(ip,1) = cmplx(dpsi(ip,1), m_zero, real64)
1653 end do
1654 call states_elec_set_state(st, mesh, ist, ik, zpsi)
1655 else
1656 if (st%randomization == par_independent) then
1657 call zmf_random(mesh, zpsi(:, 1), &
1658 pre_shift = mesh%pv%xlocal-1, &
1659 post_shift = mesh%pv%np_global - mesh%pv%xlocal - mesh%np + 1, &
1660 normalized = normalized)
1661 ! Ensures that the grid points are properly distributed in the domain parallel case
1662 if(mesh%parallel_in_domains) then
1663 call batch_init(ffb, zpsi(:,1))
1664 call zmesh_batch_exchange_points(mesh, ffb, backward_map = .true.)
1665 call ffb%end()
1666 end if
1667 else
1668 call zmf_random(mesh, zpsi(:, 1), normalized = normalized)
1669 if (.not. state_kpt_is_local(st, ist, ik)) cycle
1670 end if
1671 end if
1672 if (.not. state_kpt_is_local(st, ist, ik)) cycle
1673 ! In this case, the spinors are made of a spatial part times a vector [alpha beta]^T in
1674 ! spin space (i.e., same spatial part for each spin component). So (alpha, beta)
1675 ! determines the spin values. The values of (alpha, beta) can be be obtained
1676 ! with simple formulae from <Sx>, <Sy>, <Sz>.
1677 !
1678 ! Note that here we orthonormalize the orbital part. This ensures that the spinors
1679 ! are untouched later in the general orthonormalization, and therefore the spin values
1680 ! of each spinor remain the same.
1681 safe_allocate(zpsi2(1:mesh%np))
1682 do jst = ist_start, ist - 1
1683 call states_elec_get_state(st, mesh, 1, jst, ik, zpsi2)
1684 zpsi(1:mesh%np, 1) = zpsi(1:mesh%np, 1) - zmf_dotp(mesh, zpsi(:, 1), zpsi2)*zpsi2(1:mesh%np)
1685 end do
1686 safe_deallocate_a(zpsi2)
1687
1688 call zmf_normalize(mesh, 1, zpsi)
1689 zpsi(1:mesh%np, 2) = zpsi(1:mesh%np, 1)
1690
1691 alpha = cmplx(sqrt(m_half + st%spin(3, ist, ik)), m_zero, real64)
1692 beta = cmplx(sqrt(m_one - abs(alpha)**2), m_zero, real64)
1693 if (abs(alpha) > m_zero) then
1694 beta = cmplx(st%spin(1, ist, ik) / abs(alpha), st%spin(2, ist, ik) / abs(alpha), real64)
1695 end if
1696 zpsi(1:mesh%np, 1) = alpha*zpsi(1:mesh%np, 1)
1697 zpsi(1:mesh%np, 2) = beta*zpsi(1:mesh%np, 2)
1698 call states_elec_set_state(st, mesh, ist, ik, zpsi)
1699 end do
1700 end do
1701 else
1702 do ik = ikpt_start, ikpt_end
1703 do ist = ist_start, ist_end
1704 do id = 1, st%d%dim
1705 if (st%randomization == par_independent) then
1706 call zmf_random(mesh, zpsi(:, id), &
1707 pre_shift = mesh%pv%xlocal-1, &
1708 post_shift = mesh%pv%np_global - mesh%pv%xlocal - mesh%np + 1, &
1709 normalized = .false.)
1710 ! Ensures that the grid points are properly distributed in the domain parallel case
1711 if(mesh%parallel_in_domains) then
1712 call batch_init(ffb, zpsi(:, id))
1713 call zmesh_batch_exchange_points(mesh, ffb, backward_map = .true.)
1714 call ffb%end()
1715 end if
1716 else
1717 call zmf_random(mesh, zpsi(:, id), normalized = .false.)
1718 end if
1719 end do
1720 ! We need to generate the wave functions even if not using them in order to be consistent with the random seed in parallel runs.
1721 if (.not. state_kpt_is_local(st, ist, ik)) cycle
1722 ! Note that mf_random normalizes each spin channel independently to 1.
1723 ! Therefore we need to renormalize the spinor:
1724 if (normalized_) call zmf_normalize(mesh, st%d%dim, zpsi)
1725 call states_elec_set_state(st, mesh, ist, ik, zpsi)
1726 end do
1727 end do
1728 end if
1729
1730 end select
1731
1732 safe_deallocate_a(dpsi)
1733 safe_deallocate_a(zpsi)
1734
1736 end subroutine states_elec_generate_random
1737
1738 ! ---------------------------------------------------------
1740 !
1741 subroutine states_elec_fermi(st, namespace, mesh, compute_spin)
1742 type(states_elec_t), intent(inout) :: st
1743 type(namespace_t), intent(in) :: namespace
1744 class(mesh_t), intent(in) :: mesh
1745 logical, optional, intent(in) :: compute_spin
1746
1748 integer :: ist, ik
1749 real(real64) :: charge
1750 complex(real64), allocatable :: zpsi(:, :)
1751
1752 push_sub(states_elec_fermi)
1753
1754 call smear_find_fermi_energy(st%smear, namespace, st%eigenval, st%occ, st%qtot, &
1755 st%nik, st%nst, st%kweights)
1756
1757 call smear_fill_occupations(st%smear, st%eigenval, st%occ, st%nik, st%nst)
1758
1759 ! check if everything is OK
1760 charge = m_zero
1761 do ist = 1, st%nst
1762 charge = charge + sum(st%occ(ist, 1:st%nik) * st%kweights(1:st%nik))
1763 end do
1764 if (abs(charge-st%qtot) > 1e-6_real64) then
1765 message(1) = 'Occupations do not integrate to total charge.'
1766 write(message(2), '(6x,f12.8,a,f12.8)') charge, ' != ', st%qtot
1767 call messages_warning(2, namespace=namespace)
1768 if (charge < m_epsilon) then
1769 message(1) = "There don't seem to be any electrons at all!"
1770 call messages_fatal(1, namespace=namespace)
1771 end if
1772 end if
1773
1774 if (st%d%ispin == spinors .and. optional_default(compute_spin,.true.)) then
1775 assert(states_are_complex(st))
1776
1777 st%spin(:,:,:) = m_zero
1778
1779 safe_allocate(zpsi(1:mesh%np, st%d%dim))
1780 do ik = st%d%kpt%start, st%d%kpt%end
1781 do ist = st%st_start, st%st_end
1782 call states_elec_get_state(st, mesh, ist, ik, zpsi)
1783 st%spin(1:3, ist, ik) = state_spin(mesh, zpsi)
1784 end do
1785 end do
1786 safe_deallocate_a(zpsi)
1787
1788 if (st%parallel_in_states .or. st%d%kpt%parallel) then
1789 call comm_allreduce(st%st_kpt_mpi_grp, st%spin)
1790 end if
1791
1792 end if
1793
1794 pop_sub(states_elec_fermi)
1795 end subroutine states_elec_fermi
1796
1797
1798 ! ---------------------------------------------------------
1800 !
1801 function states_elec_eigenvalues_sum(st, alt_eig) result(tot)
1802 type(states_elec_t), intent(in) :: st
1803 real(real64), optional, intent(in) :: alt_eig(st%st_start:, st%d%kpt%start:)
1805 real(real64) :: tot
1806
1807 integer :: ik
1808
1810
1811 tot = m_zero
1812 do ik = st%d%kpt%start, st%d%kpt%end
1813 if (present(alt_eig)) then
1814 tot = tot + st%kweights(ik) * sum(st%occ(st%st_start:st%st_end, ik) * &
1815 alt_eig(st%st_start:st%st_end, ik))
1816 else
1817 tot = tot + st%kweights(ik) * sum(st%occ(st%st_start:st%st_end, ik) * &
1818 st%eigenval(st%st_start:st%st_end, ik))
1819 end if
1820 end do
1821
1822 if (st%parallel_in_states .or. st%d%kpt%parallel) call comm_allreduce(st%st_kpt_mpi_grp, tot)
1823
1825 end function states_elec_eigenvalues_sum
1826
1827
1829 subroutine states_elec_distribute_nodes(st, namespace, mc)
1830 type(states_elec_t), intent(inout) :: st
1831 type(namespace_t), intent(in) :: namespace
1832 type(multicomm_t), intent(in) :: mc
1833
1834 logical :: default_scalapack_compatible
1835
1837
1838 ! TODO(Alex) Issue #820. This is superflous. These defaults are set in initialisation of
1839 ! states, and in the state distribution instance
1840 ! Defaults.
1841 st%node(:) = 0
1842 st%st_start = 1
1843 st%st_end = st%nst
1844 st%lnst = st%nst
1845 st%parallel_in_states = .false.
1846
1847 call mpi_grp_init(st%mpi_grp, mc%group_comm(p_strategy_states))
1848 call mpi_grp_init(st%dom_st_kpt_mpi_grp, mc%dom_st_kpt_comm)
1849 call mpi_grp_init(st%dom_st_mpi_grp, mc%dom_st_comm)
1850 call mpi_grp_init(st%st_kpt_mpi_grp, mc%st_kpt_comm)
1851
1852 default_scalapack_compatible = calc_mode_par%scalapack_compat() .and. .not. st%d%kpt%parallel
1853
1854 !%Variable ScaLAPACKCompatible
1855 !%Type logical
1856 !%Section Execution::Parallelization
1857 !%Description
1858 !% Whether to use a layout for states parallelization which is compatible with ScaLAPACK.
1859 !% The default is yes for <tt>CalculationMode = gs, unocc, go</tt> without k-point parallelization,
1860 !% and no otherwise. (Setting to other than default is experimental.)
1861 !% The value must be yes if any ScaLAPACK routines are called in the course of the run;
1862 !% it must be set by hand for <tt>td</tt> with <tt>TDDynamics = bo</tt>.
1863 !% This variable has no effect unless you are using states parallelization and have linked ScaLAPACK.
1864 !% Note: currently, use of ScaLAPACK is not compatible with task parallelization (<i>i.e.</i> slaves).
1865 !%End
1866 call parse_variable(namespace, 'ScaLAPACKCompatible', default_scalapack_compatible, st%scalapack_compatible)
1867
1868#ifdef HAVE_SCALAPACK
1869 if (default_scalapack_compatible .neqv. st%scalapack_compatible) then
1870 call messages_experimental('Setting ScaLAPACKCompatible to other than default', namespace=namespace)
1871 end if
1872
1873 if (st%scalapack_compatible) then
1874 if (multicomm_have_slaves(mc)) then
1875 call messages_not_implemented("ScaLAPACK usage with task parallelization (slaves)", namespace=namespace)
1876 end if
1877 call blacs_proc_grid_init(st%dom_st_proc_grid, st%dom_st_mpi_grp)
1878 end if
1879#else
1880 st%scalapack_compatible = .false.
1881#endif
1882
1884
1885#ifdef HAVE_MPI
1886 call multicomm_create_all_pairs(st%mpi_grp, st%ap)
1887#endif
1888
1889 if (st%nst < st%mpi_grp%size) then
1890 message(1) = "Have more processors than necessary"
1891 write(message(2),'(i4,a,i4,a)') st%mpi_grp%size, " processors and ", st%nst, " states."
1892 call messages_fatal(2, namespace=namespace)
1893 end if
1895 call distributed_init(st%dist, st%nst, st%mpi_grp%comm, "states", scalapack_compat = st%scalapack_compatible)
1896
1897 st%parallel_in_states = st%dist%parallel
1898
1899 ! TODO(Alex) Issue #820. Remove lnst, st_start, st_end and node, as they are all contained within dist
1900 st%st_start = st%dist%start
1901 st%st_end = st%dist%end
1902 st%lnst = st%dist%nlocal
1903 st%node(1:st%nst) = st%dist%node(1:st%nst)
1904
1905 end if
1906
1908
1910 end subroutine states_elec_distribute_nodes
1911
1912
1918 !
1919 subroutine states_elec_calc_quantities(gr, st, kpoints, nlcc, &
1920 kinetic_energy_density, paramagnetic_current, density_gradient, density_laplacian, &
1921 gi_kinetic_energy_density, st_end)
1922 type(grid_t), intent(in) :: gr
1923 type(states_elec_t), intent(in) :: st
1924 type(kpoints_t), intent(in) :: kpoints
1925 logical, intent(in) :: nlcc
1926 real(real64), optional, target, intent(out) :: kinetic_energy_density(:,:)
1927 real(real64), optional, target, intent(out) :: paramagnetic_current(:,:,:)
1928 real(real64), optional, intent(out) :: density_gradient(:,:,:)
1929 real(real64), contiguous, optional, intent(out) :: density_laplacian(:,:)
1930 real(real64), optional, intent(out) :: gi_kinetic_energy_density(:,:)
1931 integer, optional, intent(in) :: st_end
1932
1933 real(real64), pointer :: jp(:, :, :)
1934 real(real64), pointer :: tau(:, :)
1935 complex(real64), allocatable :: wf_psi(:,:), gwf_psi(:,:,:), wf_psi_conj(:,:), lwf_psi(:,:)
1936 real(real64), allocatable :: abs_wf_psi(:,:), abs_gwf_psi(:,:)
1937 complex(real64), allocatable :: psi_gpsi(:,:)
1938 complex(real64) :: c_tmp
1939 integer :: is, ik, ist, i_dim, st_dim, ii, st_end_
1940 real(real64) :: ww, kpoint(gr%der%dim)
1941 logical :: something_to_do
1942
1943 call profiling_in("STATES_CALC_QUANTITIES")
1944
1946
1947 st_end_ = min(st%st_end, optional_default(st_end, st%st_end))
1948
1949 something_to_do = present(kinetic_energy_density) .or. present(gi_kinetic_energy_density) .or. &
1950 present(paramagnetic_current) .or. present(density_gradient) .or. present(density_laplacian)
1951 assert(something_to_do)
1952
1953 safe_allocate( wf_psi(1:gr%np_part, 1:st%d%dim))
1954 safe_allocate( wf_psi_conj(1:gr%np_part, 1:st%d%dim))
1955 safe_allocate(gwf_psi(1:gr%np, 1:gr%der%dim, 1:st%d%dim))
1956 safe_allocate(abs_wf_psi(1:gr%np, 1:st%d%dim))
1957 safe_allocate(abs_gwf_psi(1:gr%np, 1:st%d%dim))
1958 safe_allocate(psi_gpsi(1:gr%np, 1:st%d%dim))
1959 if (present(density_laplacian)) then
1960 safe_allocate(lwf_psi(1:gr%np, 1:st%d%dim))
1961 end if
1962
1963 nullify(tau)
1964 if (present(kinetic_energy_density)) tau => kinetic_energy_density
1965
1966 nullify(jp)
1967 if (present(paramagnetic_current)) jp => paramagnetic_current
1968
1969 ! for the gauge-invariant kinetic energy density we need the
1970 ! current and the kinetic energy density
1971 if (present(gi_kinetic_energy_density)) then
1972 if (.not. present(paramagnetic_current) .and. states_are_complex(st)) then
1973 safe_allocate(jp(1:gr%np, 1:gr%der%dim, 1:st%d%nspin))
1974 end if
1975 if (.not. present(kinetic_energy_density)) then
1976 safe_allocate(tau(1:gr%np, 1:st%d%nspin))
1977 end if
1978 end if
1979
1980 if (associated(tau)) tau = m_zero
1981 if (associated(jp)) jp = m_zero
1982 if (present(density_gradient)) density_gradient(:,:,:) = m_zero
1983 if (present(density_laplacian)) density_laplacian(:,:) = m_zero
1984 if (present(gi_kinetic_energy_density)) gi_kinetic_energy_density = m_zero
1985
1986 do ik = st%d%kpt%start, st%d%kpt%end
1987
1988 kpoint(1:gr%der%dim) = kpoints%get_point(st%d%get_kpoint_index(ik))
1989 is = st%d%get_spin_index(ik)
1990
1991 do ist = st%st_start, st_end_
1992 ww = st%kweights(ik)*st%occ(ist, ik)
1993 if (abs(ww) <= m_epsilon) cycle
1994
1995 ! all calculations will be done with complex wavefunctions
1996 call states_elec_get_state(st, gr, ist, ik, wf_psi)
1997
1998 do st_dim = 1, st%d%dim
1999 call boundaries_set(gr%der%boundaries, gr, wf_psi(:, st_dim))
2000 end do
2001
2002 ! calculate gradient of the wavefunction
2003 do st_dim = 1, st%d%dim
2004 call zderivatives_grad(gr%der, wf_psi(:,st_dim), gwf_psi(:,:,st_dim), set_bc = .false.)
2005 end do
2006
2007 ! calculate the Laplacian of the wavefunction
2008 if (present(density_laplacian)) then
2009 do st_dim = 1, st%d%dim
2010 call zderivatives_lapl(gr%der, wf_psi(:,st_dim), lwf_psi(:,st_dim), ghost_update = .false., set_bc = .false.)
2011 end do
2012 end if
2013
2014 ! We precompute some quantites, to avoid to compute it many times
2015 wf_psi_conj(1:gr%np, 1:st%d%dim) = conjg(wf_psi(1:gr%np,1:st%d%dim))
2016 abs_wf_psi(1:gr%np, 1:st%d%dim) = real(wf_psi_conj(1:gr%np, 1:st%d%dim) * wf_psi(1:gr%np, 1:st%d%dim), real64)
2017
2018 if (present(density_laplacian)) then
2019 density_laplacian(1:gr%np, is) = density_laplacian(1:gr%np, is) + &
2020 ww * m_two*real(wf_psi_conj(1:gr%np, 1) * lwf_psi(1:gr%np, 1), real64)
2021 if (st%d%ispin == spinors) then
2022 density_laplacian(1:gr%np, 2) = density_laplacian(1:gr%np, 2) + &
2023 ww * m_two*real(wf_psi_conj(1:gr%np, 2) * lwf_psi(1:gr%np, 2), real64)
2024 !$omp parallel do private(c_tmp)
2025 do ii = 1, gr%np
2026 c_tmp = ww*(lwf_psi(ii, 1) * wf_psi_conj(ii, 2) + wf_psi(ii, 1) * conjg(lwf_psi(ii, 2)))
2027 density_laplacian(ii, 3) = density_laplacian(ii, 3) + real(c_tmp, real64)
2028 density_laplacian(ii, 4) = density_laplacian(ii, 4) + aimag(c_tmp)
2029 end do
2030 end if
2031 end if
2032
2033 if (associated(tau)) then
2034 tau(1:gr%np, is) = tau(1:gr%np, is) &
2035 + ww * sum(kpoint(1:gr%der%dim)**2) * abs_wf_psi(1:gr%np, 1)
2036 if (st%d%ispin == spinors) then
2037 tau(1:gr%np, 2) = tau(1:gr%np, 2) &
2038 + ww * sum(kpoint(1:gr%der%dim)**2) * abs_wf_psi(1:gr%np, 2)
2039
2040 !$omp parallel do private(c_tmp)
2041 do ii = 1, gr%np
2042 c_tmp = ww * sum(kpoint(1:gr%der%dim)**2) * wf_psi(ii, 1) * wf_psi_conj(ii, 2)
2043 tau(ii, 3) = tau(ii, 3) + real(c_tmp, real64)
2044 tau(ii, 4) = tau(ii, 4) + aimag(c_tmp)
2045 end do
2046 end if
2047 end if
2048
2049 do i_dim = 1, gr%der%dim
2050
2051 ! We precompute some quantites, to avoid to compute them many times
2052 psi_gpsi(1:gr%np, 1:st%d%dim) = wf_psi_conj(1:gr%np, 1:st%d%dim) &
2053 * gwf_psi(1:gr%np,i_dim,1:st%d%dim)
2054 abs_gwf_psi(1:gr%np, 1:st%d%dim) = real(conjg(gwf_psi(1:gr%np, i_dim, 1:st%d%dim)) &
2055 * gwf_psi(1:gr%np, i_dim, 1:st%d%dim), real64)
2056
2057 if (present(density_gradient)) then
2058 density_gradient(1:gr%np, i_dim, is) = density_gradient(1:gr%np, i_dim, is) &
2059 + ww * m_two * real(psi_gpsi(1:gr%np, 1), real64)
2060 if (st%d%ispin == spinors) then
2061 density_gradient(1:gr%np, i_dim, 2) = density_gradient(1:gr%np, i_dim, 2) &
2062 + ww * m_two*real(psi_gpsi(1:gr%np, 2), real64)
2063 !$omp parallel do private(c_tmp)
2064 do ii = 1, gr%np
2065 c_tmp = ww * (gwf_psi(ii, i_dim, 1) * wf_psi_conj(ii, 2) + wf_psi(ii, 1) * conjg(gwf_psi(ii, i_dim, 2)))
2066 density_gradient(ii, i_dim, 3) = density_gradient(ii, i_dim, 3) + real(c_tmp, real64)
2067 density_gradient(ii, i_dim, 4) = density_gradient(ii, i_dim, 4) + aimag(c_tmp)
2068 end do
2069 end if
2070 end if
2071
2072 if (present(density_laplacian)) then
2073 call lalg_axpy(gr%np, ww*m_two, abs_gwf_psi(:,1), density_laplacian(:,is))
2074 if (st%d%ispin == spinors) then
2075 call lalg_axpy(gr%np, ww*m_two, abs_gwf_psi(:,2), density_laplacian(:,2))
2076 !$omp parallel do private(c_tmp)
2077 do ii = 1, gr%np
2078 c_tmp = m_two * ww * gwf_psi(ii, i_dim, 1) * conjg(gwf_psi(ii, i_dim, 2))
2079 density_laplacian(ii, 3) = density_laplacian(ii, 3) + real(c_tmp, real64)
2080 density_laplacian(ii, 4) = density_laplacian(ii, 4) + aimag(c_tmp)
2081 end do
2082 end if
2083 end if
2084
2085 ! the expression for the paramagnetic current with spinors is
2086 ! j = ( jp(1) jp(3) + i jp(4) )
2087 ! (-jp(3) + i jp(4) jp(2) )
2088 if (associated(jp)) then
2089 if (.not.(states_are_real(st))) then
2090 jp(1:gr%np, i_dim, is) = jp(1:gr%np, i_dim, is) + &
2091 ww*(aimag(psi_gpsi(1:gr%np, 1)) - abs_wf_psi(1:gr%np, 1)*kpoint(i_dim))
2092 if (st%d%ispin == spinors) then
2093 jp(1:gr%np, i_dim, 2) = jp(1:gr%np, i_dim, 2) + &
2094 ww*( aimag(psi_gpsi(1:gr%np, 2)) - abs_wf_psi(1:gr%np, 2)*kpoint(i_dim))
2095 !$omp parallel do private(c_tmp)
2096 do ii = 1, gr%np
2097 c_tmp = -ww*m_half*m_zi*(gwf_psi(ii, i_dim, 1)*wf_psi_conj(ii, 2) - wf_psi(ii, 1)*conjg(gwf_psi(ii, i_dim, 2)) &
2098 - m_two * m_zi*wf_psi(ii, 1)*wf_psi_conj(ii, 2)*kpoint(i_dim))
2099 jp(ii, i_dim, 3) = jp(ii, i_dim, 3) + real(c_tmp, real64)
2100 jp(ii, i_dim, 4) = jp(ii, i_dim, 4) + aimag(c_tmp)
2101 end do
2102 end if
2103 end if
2104 end if
2105
2106 ! the expression for the paramagnetic current with spinors is
2107 ! t = ( tau(1) tau(3) + i tau(4) )
2108 ! ( tau(3) - i tau(4) tau(2) )
2109 if (associated(tau)) then
2110 tau(1:gr%np, is) = tau(1:gr%np, is) + ww*(abs_gwf_psi(1:gr%np,1) &
2111 - m_two*aimag(psi_gpsi(1:gr%np, 1))*kpoint(i_dim))
2112 if (st%d%ispin == spinors) then
2113 tau(1:gr%np, 2) = tau(1:gr%np, 2) + ww*(abs_gwf_psi(1:gr%np, 2) &
2114 - m_two*aimag(psi_gpsi(1:gr%np, 2))*kpoint(i_dim))
2115 !$omp parallel do private(c_tmp)
2116 do ii = 1, gr%np
2117 c_tmp = ww * ( gwf_psi(ii, i_dim, 1)*conjg(gwf_psi(ii, i_dim, 2)) &
2118 + m_zi * (gwf_psi(ii,i_dim,1)*wf_psi_conj(ii, 2) &
2119 - wf_psi(ii, 1)*conjg(gwf_psi(ii,i_dim,2)))*kpoint(i_dim))
2120 tau(ii, 3) = tau(ii, 3) + real(c_tmp, real64)
2121 tau(ii, 4) = tau(ii, 4) + aimag(c_tmp)
2122 end do
2123 end if
2124 end if
2125
2126 end do
2127
2128 end do
2129 end do
2130
2131 safe_deallocate_a(wf_psi_conj)
2132 safe_deallocate_a(abs_wf_psi)
2133 safe_deallocate_a(abs_gwf_psi)
2134 safe_deallocate_a(psi_gpsi)
2135
2136 if (.not. present(gi_kinetic_energy_density)) then
2137 if (.not. present(paramagnetic_current)) then
2138 safe_deallocate_p(jp)
2139 end if
2140 if (.not. present(kinetic_energy_density)) then
2141 safe_deallocate_p(tau)
2142 end if
2143 end if
2144
2145 if (st%parallel_in_states .or. st%d%kpt%parallel) call reduce_all(st%st_kpt_mpi_grp)
2146
2147 ! We have to symmetrize everything as they are calculated from the
2148 ! wavefunctions.
2149 ! This must be done before compute the gauge-invariant kinetic energy density
2150 if (st%symmetrize_density) then
2151 do is = 1, st%d%nspin
2152 if (associated(tau)) then
2153 call dgrid_symmetrize_scalar_field(gr, tau(:, is), suppress_warning = .true.)
2154 end if
2155
2156 if (present(density_laplacian)) then
2157 call dgrid_symmetrize_scalar_field(gr, density_laplacian(:, is), suppress_warning = .true.)
2158 end if
2159
2160 if (associated(jp)) then
2161 call dgrid_symmetrize_vector_field(gr, jp(:, :, is), suppress_warning = .true.)
2162 end if
2163
2164 if (present(density_gradient)) then
2165 call dgrid_symmetrize_vector_field(gr, density_gradient(:, :, is), suppress_warning = .true.)
2166 end if
2167 end do
2168 end if
2169
2170
2171 if (allocated(st%rho_core) .and. nlcc .and. (present(density_laplacian) .or. present(density_gradient))) then
2172 do ii = 1, gr%np
2173 wf_psi(ii, 1) = st%rho_core(ii)/st%d%spin_channels
2174 end do
2175
2176 call boundaries_set(gr%der%boundaries, gr, wf_psi(:, 1))
2177
2178 if (present(density_gradient)) then
2179 ! calculate gradient of the NLCC
2180 call zderivatives_grad(gr%der, wf_psi(:,1), gwf_psi(:,:,1), set_bc = .false.)
2181 do is = 1, st%d%spin_channels
2182 density_gradient(1:gr%np, 1:gr%der%dim, is) = density_gradient(1:gr%np, 1:gr%der%dim, is) + &
2183 real(gwf_psi(1:gr%np, 1:gr%der%dim,1))
2184 end do
2185 end if
2186
2187 ! calculate the Laplacian of the wavefunction
2188 if (present(density_laplacian)) then
2189 call zderivatives_lapl(gr%der, wf_psi(:,1), lwf_psi(:,1), set_bc = .false.)
2190
2191 do is = 1, st%d%spin_channels
2192 density_laplacian(1:gr%np, is) = density_laplacian(1:gr%np, is) + real(lwf_psi(1:gr%np, 1))
2193 end do
2194 end if
2195 end if
2196
2197 !If we freeze some of the orbitals, we need to had the contributions here
2198 !Only in the case we are not computing it
2199 if (allocated(st%frozen_tau) .and. .not. present(st_end)) then
2200 call lalg_axpy(gr%np, st%d%nspin, m_one, st%frozen_tau, tau)
2201 end if
2202 if (allocated(st%frozen_gdens) .and. .not. present(st_end)) then
2203 do is = 1, st%d%nspin
2204 call lalg_axpy(gr%np, gr%der%dim, m_one, st%frozen_gdens(:,:,is), density_gradient(:,:,is))
2205 end do
2206 end if
2207 if (allocated(st%frozen_tau) .and. .not. present(st_end)) then
2208 call lalg_axpy(gr%np, st%d%nspin, m_one, st%frozen_ldens, density_laplacian)
2209 end if
2210
2211 safe_deallocate_a(wf_psi)
2212 safe_deallocate_a(gwf_psi)
2213 safe_deallocate_a(lwf_psi)
2214
2215
2216 !We compute the gauge-invariant kinetic energy density
2217 if (present(gi_kinetic_energy_density)) then
2218 do is = 1, st%d%nspin
2219 assert(associated(tau))
2220 gi_kinetic_energy_density(1:gr%np, is) = tau(1:gr%np, is)
2221 end do
2222 if (states_are_complex(st)) then
2223 assert(associated(jp))
2224 if (st%d%ispin /= spinors) then
2225 do is = 1, st%d%nspin
2226 !$omp parallel do
2227 do ii = 1, gr%np
2228 if (st%rho(ii, is) < 1.0e-7_real64) cycle
2229 gi_kinetic_energy_density(ii, is) = &
2230 gi_kinetic_energy_density(ii, is) - sum(jp(ii,1:gr%der%dim, is)**2)/st%rho(ii, is)
2231 end do
2232 end do
2233 else ! Note that this is only the U(1) part of the gauge-invariant KED
2234 !$omp parallel do
2235 do ii = 1, gr%np
2236 gi_kinetic_energy_density(ii, 1) = gi_kinetic_energy_density(ii, 1) &
2237 - sum(jp(ii,1:gr%der%dim, 1)**2)/(safe_tol(st%rho(ii, 1),m_epsilon))
2238 gi_kinetic_energy_density(ii, 2) = gi_kinetic_energy_density(ii, 2) &
2239 - sum(jp(ii,1:gr%der%dim, 2)**2)/(safe_tol(st%rho(ii, 2),m_epsilon))
2240 gi_kinetic_energy_density(ii, 3) = &
2241 gi_kinetic_energy_density(ii, 3) - sum(jp(ii,1:gr%der%dim, 3)**2 + jp(ii,1:gr%der%dim, 4)**2) &
2242 /(safe_tol((st%rho(ii, 3)**2 + st%rho(ii, 4)**2), m_epsilon))*st%rho(ii, 3)
2243 gi_kinetic_energy_density(ii, 4) = &
2244 gi_kinetic_energy_density(ii, 4) + sum(jp(ii,1:gr%der%dim, 3)**2 + jp(ii,1:gr%der%dim, 4)**2) &
2245 /(safe_tol((st%rho(ii, 3)**2 + st%rho(ii, 4)**2), m_epsilon))*st%rho(ii, 4)
2246 end do
2247 end if
2248 end if
2249 end if
2250
2251 if (.not. present(kinetic_energy_density)) then
2252 safe_deallocate_p(tau)
2253 end if
2254 if (.not. present(paramagnetic_current)) then
2255 safe_deallocate_p(jp)
2256 end if
2257
2258
2260
2261 call profiling_out("STATES_CALC_QUANTITIES")
2262
2263 contains
2264
2265 subroutine reduce_all(grp)
2266 type(mpi_grp_t), intent(in) :: grp
2267
2269
2270 if (associated(tau)) call comm_allreduce(grp, tau, dim = (/gr%np, st%d%nspin/))
2271
2272 if (present(density_laplacian)) call comm_allreduce(grp, density_laplacian, dim = (/gr%np, st%d%nspin/))
2273
2274 do is = 1, st%d%nspin
2275 if (associated(jp)) call comm_allreduce(grp, jp(:, :, is), dim = (/gr%np, gr%der%dim/))
2276
2277 if (present(density_gradient)) then
2278 call comm_allreduce(grp, density_gradient(:, :, is), dim = (/gr%np, gr%der%dim/))
2279 end if
2280 end do
2281
2283 end subroutine reduce_all
2284
2285 end subroutine states_elec_calc_quantities
2286
2287
2288 ! ---------------------------------------------------------
2290 !
2291 function state_spin(mesh, f1) result(spin)
2292 type(mesh_t), intent(in) :: mesh
2293 complex(real64), intent(in) :: f1(:, :)
2294 real(real64) :: spin(1:3)
2295
2296 complex(real64) :: z
2297
2298 push_sub(state_spin)
2299
2300 z = zmf_dotp(mesh, f1(:, 1) , f1(:, 2))
2301
2302 spin(1) = m_two*real(z, real64)
2303 spin(2) = m_two*aimag(z)
2304 spin(3) = zmf_nrm2(mesh, f1(:, 1))**2 - zmf_nrm2(mesh, f1(:, 2))**2
2305 spin = m_half*spin ! spin is half the sigma matrix.
2306
2307 pop_sub(state_spin)
2308 end function state_spin
2309
2310 ! ---------------------------------------------------------
2312 !
2313 logical function state_is_local(st, ist)
2314 type(states_elec_t), intent(in) :: st
2315 integer, intent(in) :: ist
2316
2317 push_sub(state_is_local)
2318
2319 state_is_local = ist >= st%st_start.and.ist <= st%st_end
2320
2321 pop_sub(state_is_local)
2322 end function state_is_local
2323
2324 ! ---------------------------------------------------------
2326 !
2327 logical function state_kpt_is_local(st, ist, ik)
2328 type(states_elec_t), intent(in) :: st
2329 integer, intent(in) :: ist
2330 integer, intent(in) :: ik
2331
2332 push_sub(state_kpt_is_local)
2333
2334 state_kpt_is_local = ist >= st%st_start .and. ist <= st%st_end .and. &
2335 ik >= st%d%kpt%start .and. ik <= st%d%kpt%end
2336
2337 pop_sub(state_kpt_is_local)
2338 end function state_kpt_is_local
2339
2340
2341 ! ---------------------------------------------------------
2343 real(real64) function states_elec_wfns_memory(st, mesh) result(memory)
2344 type(states_elec_t), intent(in) :: st
2345 class(mesh_t), intent(in) :: mesh
2346
2347 push_sub(states_elec_wfns_memory)
2348 memory = m_zero
2349
2350 ! orbitals
2351 memory = memory + real64*real(mesh%np_part_global, real64) *st%d%dim*real(st%nst, real64) *st%d%kpt%nglobal
2352
2354 end function states_elec_wfns_memory
2355
2356 ! ---------------------------------------------------------
2359 subroutine states_elec_pack(st, copy)
2360 class(states_elec_t), intent(inout) :: st
2361 logical, optional, intent(in) :: copy
2362
2363 integer :: iqn, ib
2364 integer(int64) :: max_mem, mem
2365
2366 push_sub(states_elec_pack)
2367
2368 ! nothing to do, already packed
2369 if (st%packed) then
2370 pop_sub(states_elec_pack)
2371 return
2372 end if
2373
2374 st%packed = .true.
2375
2376 if (accel_is_enabled()) then
2377 max_mem = accel_global_memory_size()
2378
2379 if (st%cl_states_mem > m_one) then
2380 max_mem = int(st%cl_states_mem, int64)*(1024_8)**2
2381 else if (st%cl_states_mem < 0.0_real64) then
2382 max_mem = max_mem + int(st%cl_states_mem, int64)*(1024_8)**2
2383 else
2384 max_mem = int(st%cl_states_mem*real(max_mem, real64) , int64)
2385 end if
2386 else
2387 max_mem = huge(max_mem)
2388 end if
2389
2390 mem = 0
2391 qnloop: do iqn = st%d%kpt%start, st%d%kpt%end
2392 do ib = st%group%block_start, st%group%block_end
2393
2394 mem = mem + st%group%psib(ib, iqn)%pack_total_size()
2395
2396 if (mem > max_mem) then
2397 call messages_write('Not enough CL device memory to store all states simultaneously.', new_line = .true.)
2398 call messages_write('Only ')
2399 call messages_write(ib - st%group%block_start)
2400 call messages_write(' of ')
2401 call messages_write(st%group%block_end - st%group%block_start + 1)
2402 call messages_write(' blocks will be stored in device memory.', new_line = .true.)
2403 call messages_warning()
2404 exit qnloop
2405 end if
2407 call st%group%psib(ib, iqn)%do_pack(copy)
2408 end do
2409 end do qnloop
2410
2411 pop_sub(states_elec_pack)
2412 end subroutine states_elec_pack
2413
2414 ! ------------------------------------------------------------
2416 !
2417 subroutine states_elec_unpack(st, copy)
2418 class(states_elec_t), intent(inout) :: st
2419 logical, optional, intent(in) :: copy
2421 integer :: iqn, ib
2422
2423 push_sub(states_elec_unpack)
2424
2425 if (st%packed) then
2426 st%packed = .false.
2427
2428 do iqn = st%d%kpt%start, st%d%kpt%end
2429 do ib = st%group%block_start, st%group%block_end
2430 if (st%group%psib(ib, iqn)%is_packed()) call st%group%psib(ib, iqn)%do_unpack(copy)
2431 end do
2432 end do
2433 end if
2434
2435 pop_sub(states_elec_unpack)
2436 end subroutine states_elec_unpack
2437
2438 ! -----------------------------------------------------------
2440 !
2441 subroutine states_elec_write_info(st, namespace)
2442 class(states_elec_t), intent(in) :: st
2443 type(namespace_t), intent(in) :: namespace
2444
2445 push_sub(states_elec_write_info)
2446
2447 call messages_print_with_emphasis(msg="States", namespace=namespace)
2448
2449 write(message(1), '(a,f12.3)') 'Total electronic charge = ', st%qtot
2450 write(message(2), '(a,i8)') 'Number of states = ', st%nst
2451 write(message(3), '(a,i8)') 'States block-size = ', st%block_size
2452 call messages_info(3, namespace=namespace)
2453
2454 call messages_print_with_emphasis(namespace=namespace)
2455
2456 pop_sub(states_elec_write_info)
2457 end subroutine states_elec_write_info
2458
2459 ! ------------------------------------------------------------
2461 !
2462 subroutine states_elec_set_zero(st)
2463 class(states_elec_t), intent(inout) :: st
2464
2465 integer :: iqn, ib
2466
2467 push_sub(states_elec_set_zero)
2468
2469 do iqn = st%d%kpt%start, st%d%kpt%end
2470 do ib = st%group%block_start, st%group%block_end
2471 call batch_set_zero(st%group%psib(ib, iqn))
2472 end do
2473 end do
2474
2475 pop_sub(states_elec_set_zero)
2476 end subroutine states_elec_set_zero
2477
2478 ! ------------------------------------------------------------
2480 !
2481 integer pure function states_elec_block_min(st, ib) result(range)
2482 type(states_elec_t), intent(in) :: st
2483 integer, intent(in) :: ib
2484
2485 range = st%group%block_range(ib, 1)
2486 end function states_elec_block_min
2487
2488 ! ------------------------------------------------------------
2490 !
2491 integer pure function states_elec_block_max(st, ib) result(range)
2492 type(states_elec_t), intent(in) :: st
2493 integer, intent(in) :: ib
2494
2495 range = st%group%block_range(ib, 2)
2496 end function states_elec_block_max
2497
2498 ! ------------------------------------------------------------
2500 !
2501 integer pure function states_elec_block_size(st, ib) result(size)
2502 type(states_elec_t), intent(in) :: st
2503 integer, intent(in) :: ib
2504
2505 size = st%group%block_size(ib)
2506 end function states_elec_block_size
2507
2508 ! ---------------------------------------------------------
2511 subroutine states_elec_count_pairs(st, namespace, n_pairs, n_occ, n_unocc, is_included, is_frac_occ)
2512 type(states_elec_t), intent(in) :: st
2513 type(namespace_t), intent(in) :: namespace
2514 integer, intent(out) :: n_pairs
2515 integer, intent(out) :: n_occ(:)
2516 integer, intent(out) :: n_unocc(:)
2517 logical, allocatable, intent(out) :: is_included(:,:,:)
2519 logical, intent(out) :: is_frac_occ
2520
2521 integer :: ik, ist, ast, n_filled, n_partially_filled, n_half_filled
2522 character(len=80) :: nst_string, default, wfn_list
2523 real(real64) :: energy_window
2524
2525 push_sub(states_elec_count_pairs)
2526
2527 is_frac_occ = .false.
2528 do ik = 1, st%nik
2529 call occupied_states(st, namespace, ik, n_filled, n_partially_filled, n_half_filled)
2530 if (n_partially_filled > 0 .or. n_half_filled > 0) is_frac_occ = .true.
2531 n_occ(ik) = n_filled + n_partially_filled + n_half_filled
2532 n_unocc(ik) = st%nst - n_filled
2533 ! when we implement occupations, partially occupied levels need to be counted as both occ and unocc.
2534 end do
2535
2536 !%Variable CasidaKSEnergyWindow
2537 !%Type float
2538 !%Section Linear Response::Casida
2539 !%Description
2540 !% An alternative to <tt>CasidaKohnShamStates</tt> for specifying which occupied-unoccupied
2541 !% transitions will be used: all those whose eigenvalue differences are less than this
2542 !% number will be included. If a value less than 0 is supplied, this criterion will not be used.
2543 !%End
2544
2545 call parse_variable(namespace, 'CasidaKSEnergyWindow', -m_one, energy_window, units_inp%energy)
2546
2547 !%Variable CasidaKohnShamStates
2548 !%Type string
2549 !%Section Linear Response::Casida
2550 !%Default all states
2551 !%Description
2552 !% The calculation of the excitation spectrum of a system in the Casida frequency-domain
2553 !% formulation of linear-response time-dependent density functional theory (TDDFT)
2554 !% implies the use of a basis set of occupied/unoccupied Kohn-Sham orbitals. This
2555 !% basis set should, in principle, include all pairs formed by all occupied states,
2556 !% and an infinite number of unoccupied states. In practice, one has to truncate this
2557 !% basis set, selecting a number of occupied and unoccupied states that will form the
2558 !% pairs. These states are specified with this variable. If there are, say, 15 occupied
2559 !% states, and one sets this variable to the value "10-18", this means that occupied
2560 !% states from 10 to 15, and unoccupied states from 16 to 18 will be considered.
2561 !%
2562 !% This variable is a string in list form, <i>i.e.</i> expressions such as "1,2-5,8-15" are
2563 !% valid. You should include a non-zero number of unoccupied states and a non-zero number
2564 !% of occupied states.
2565 !%End
2566
2567 n_pairs = 0
2568 safe_allocate(is_included(maxval(n_occ), minval(n_occ) + 1:st%nst , st%nik))
2569 is_included(:,:,:) = .false.
2570
2571 if (energy_window < m_zero) then
2572 write(nst_string,'(i6)') st%nst
2573 write(default,'(a,a)') "1-", trim(adjustl(nst_string))
2574 call parse_variable(namespace, 'CasidaKohnShamStates', default, wfn_list)
2575
2576 write(message(1),'(a,a)') "Info: States that form the basis: ", trim(wfn_list)
2577 call messages_info(1, namespace=namespace)
2578
2579 ! count pairs
2580 n_pairs = 0
2581 do ik = 1, st%nik
2582 do ast = n_occ(ik) + 1, st%nst
2583 if (loct_isinstringlist(ast, wfn_list)) then
2584 do ist = 1, n_occ(ik)
2585 if (loct_isinstringlist(ist, wfn_list)) then
2586 n_pairs = n_pairs + 1
2587 is_included(ist, ast, ik) = .true.
2588 end if
2589 end do
2590 end if
2591 end do
2592 end do
2593
2594 else ! using CasidaKSEnergyWindow
2595
2596 write(message(1),'(a,f12.6,a)') "Info: including transitions with energy < ", &
2597 units_from_atomic(units_out%energy, energy_window), trim(units_abbrev(units_out%energy))
2598 call messages_info(1, namespace=namespace)
2599
2600 ! count pairs
2601 n_pairs = 0
2602 do ik = 1, st%nik
2603 do ast = n_occ(ik) + 1, st%nst
2604 do ist = 1, n_occ(ik)
2605 if (st%eigenval(ast, ik) - st%eigenval(ist, ik) < energy_window) then
2606 n_pairs = n_pairs + 1
2607 is_included(ist, ast, ik) = .true.
2608 end if
2609 end do
2610 end do
2611 end do
2612
2613 end if
2614
2616 end subroutine states_elec_count_pairs
2617
2618
2634 !
2635 subroutine occupied_states(st, namespace, ik, n_filled, n_partially_filled, n_half_filled, &
2636 filled, partially_filled, half_filled)
2637 type(states_elec_t), intent(in) :: st
2638 type(namespace_t), intent(in) :: namespace
2639 integer, intent(in) :: ik
2640 integer, intent(out) :: n_filled, n_partially_filled, n_half_filled
2641 integer, optional, intent(out) :: filled(:), partially_filled(:), half_filled(:)
2642
2643 integer :: ist
2644
2645 push_sub(occupied_states)
2646
2647 if (present(filled)) filled(:) = 0
2648 if (present(partially_filled)) partially_filled(:) = 0
2649 if (present(half_filled)) half_filled(:) = 0
2650 n_filled = 0
2651 n_partially_filled = 0
2652 n_half_filled = 0
2653
2654 select case (st%d%ispin)
2655 case (unpolarized)
2656 do ist = 1, st%nst
2657 if (abs(st%occ(ist, ik) - m_two) < m_min_occ) then
2658 n_filled = n_filled + 1
2659 if (present(filled)) filled(n_filled) = ist
2660 else if (abs(st%occ(ist, ik) - m_one) < m_min_occ) then
2661 n_half_filled = n_half_filled + 1
2662 if (present(half_filled)) half_filled(n_half_filled) = ist
2663 else if (st%occ(ist, ik) > m_min_occ) then
2664 n_partially_filled = n_partially_filled + 1
2665 if (present(partially_filled)) partially_filled(n_partially_filled) = ist
2666 else if (abs(st%occ(ist, ik)) > m_min_occ) then
2667 write(message(1),*) 'Internal error in occupied_states: Illegal occupation value ', st%occ(ist, ik)
2668 call messages_fatal(1, namespace=namespace)
2669 end if
2670 end do
2671 case (spin_polarized, spinors)
2672 do ist = 1, st%nst
2673 if (abs(st%occ(ist, ik)-m_one) < m_min_occ) then
2674 n_filled = n_filled + 1
2675 if (present(filled)) filled(n_filled) = ist
2676 else if (st%occ(ist, ik) > m_min_occ) then
2677 n_partially_filled = n_partially_filled + 1
2678 if (present(partially_filled)) partially_filled(n_partially_filled) = ist
2679 else if (abs(st%occ(ist, ik)) > m_min_occ) then
2680 write(message(1),*) 'Internal error in occupied_states: Illegal occupation value ', st%occ(ist, ik)
2681 call messages_fatal(1, namespace=namespace)
2682 end if
2683 end do
2684 end select
2685
2686 pop_sub(occupied_states)
2687 end subroutine occupied_states
2688
2689
2691 subroutine kpoints_distribute(this, mc)
2692 type(states_elec_t), intent(inout) :: this
2693 type(multicomm_t), intent(in) :: mc
2694
2695 push_sub(kpoints_distribute)
2696 call distributed_init(this%d%kpt, this%nik, mc%group_comm(p_strategy_kpoints), "k-points")
2697
2698 pop_sub(kpoints_distribute)
2699 end subroutine kpoints_distribute
2700
2701
2702 ! TODO(Alex) Issue #824. Consider converting this to a function to returns `st_kpt_task`
2703 ! as this is only called in a couple of places, or package with the `st_kpt_mpi_grp` when split
2704 ! from st instance
2707 type(states_elec_t), intent(inout) :: st
2708
2710
2711 if (.not. allocated(st%st_kpt_task)) then
2712 safe_allocate(st%st_kpt_task(0:st%st_kpt_mpi_grp%size-1, 1:4))
2713 end if
2714
2715 st%st_kpt_task(0:st%st_kpt_mpi_grp%size-1, :) = 0
2716 st%st_kpt_task(st%st_kpt_mpi_grp%rank, :) = [st%st_start, st%st_end, st%d%kpt%start, st%d%kpt%end]
2717
2718 if (st%parallel_in_states .or. st%d%kpt%parallel) then
2719 call comm_allreduce(st%st_kpt_mpi_grp, st%st_kpt_task)
2720 end if
2721
2724
2725 ! ---------------------------------------------------------
2727 !
2728 subroutine states_elec_choose_kpoints(st, kpoints, namespace)
2729 type(states_elec_t), target, intent(inout) :: st
2730 type(kpoints_t), intent(in) :: kpoints
2731 type(namespace_t), intent(in) :: namespace
2732
2733 integer :: ik, iq
2734 type(states_elec_dim_t), pointer :: dd
2735
2736
2738
2739 dd => st%d
2740
2741 st%nik = kpoints_number(kpoints)
2742
2743 if (dd%ispin == spin_polarized) st%nik = 2*st%nik
2744
2745 safe_allocate(st%kweights(1:st%nik))
2746
2747 do iq = 1, st%nik
2748 ik = dd%get_kpoint_index(iq)
2749 st%kweights(iq) = kpoints%get_weight(ik)
2750 end do
2751
2752 if (debug%info) call print_kpoints_debug
2754
2755 contains
2756 subroutine print_kpoints_debug
2757 integer :: iunit
2758
2760
2761 call io_mkdir('debug/', namespace)
2762 iunit = io_open('debug/kpoints', namespace, action = 'write')
2763 call kpoints%write_info(iunit=iunit, absolute_coordinates = .true.)
2764 call io_close(iunit)
2765
2767 end subroutine print_kpoints_debug
2768
2769 end subroutine states_elec_choose_kpoints
2770
2775 !
2776 function states_elec_calculate_dipole(this, gr) result(dipole)
2777 class(states_elec_t), intent(in) :: this
2778 class(mesh_t), intent(in) :: gr
2779 real(real64) :: dipole(1:gr%box%dim)
2780
2781 integer :: ispin
2782 real(real64) :: e_dip(1:gr%box%dim, this%d%spin_channels)
2783
2785
2786 do ispin = 1, this%d%spin_channels
2787 call dmf_dipole(gr, this%rho(:, ispin), e_dip(:, ispin))
2788 end do
2789
2790 dipole(:) = sum(e_dip(:, 1:this%d%spin_channels), 2) ! dipole moment <mu_el> = \sum_i -e <x_i>
2791
2793 end function states_elec_calculate_dipole
2794
2795
2796#include "undef.F90"
2797#include "real.F90"
2798#include "states_elec_inc.F90"
2800#include "undef.F90"
2801#include "complex.F90"
2802#include "states_elec_inc.F90"
2803#include "undef.F90"
2804
2805end module states_elec_oct_m
2806
2807
2808!! Local Variables:
2809!! mode: f90
2810!! coding: utf-8
2811!! End:
initialize a batch with existing memory
Definition: batch.F90:267
constant times a vector plus a vector
Definition: lalg_basic.F90:170
Prints out to iunit a message in the form: ["InputVariable" = value] where "InputVariable" is given b...
Definition: messages.F90:180
pure logical function, public accel_is_enabled()
Definition: accel.F90:400
type(accel_t), public accel
Definition: accel.F90:270
This module implements batches of mesh functions.
Definition: batch.F90:133
This module implements common operations on batches of mesh functions.
Definition: batch_ops.F90:116
This module provides the BLACS processor grid.
subroutine, public blacs_proc_grid_init(this, mpi_grp, procdim)
Initializes a blacs context from an MPI communicator with topological information.
subroutine, public blacs_proc_grid_end(this)
subroutine, public blacs_proc_grid_copy(cin, cout)
Module implementing boundary conditions in Octopus.
Definition: boundaries.F90:122
This module handles the calculation mode.
type(calc_mode_par_t), public calc_mode_par
Singleton instance of parallel calculation mode.
integer, parameter, public p_strategy_states
parallelization in states
This module calculates the derivatives (gradients, Laplacians, etc.) of a function.
subroutine, public zderivatives_grad(der, ff, op_ff, ghost_update, set_bc, to_cartesian)
apply the gradient to a mesh function
subroutine, public zderivatives_lapl(der, ff, op_ff, ghost_update, set_bc, factor)
apply the Laplacian to a mesh function
subroutine, public distributed_end(this)
subroutine, public distributed_nullify(this, total)
subroutine, public distributed_init(this, total, comm, tag, scalapack_compat)
Distribute N instances across M processes of communicator comm
subroutine, public distributed_copy(in, out)
@Brief Create a copy of a distributed instance
integer, parameter, public unpolarized
Parameters...
integer, parameter, public spinors
integer, parameter, public spin_polarized
real(real64), parameter, public m_two
Definition: global.F90:189
real(real64), parameter, public m_zero
Definition: global.F90:187
real(real64), parameter, public m_fourth
Definition: global.F90:196
complex(real64), parameter, public m_zi
Definition: global.F90:201
real(real64), parameter, public m_epsilon
Definition: global.F90:203
type(conf_t), public conf
Global instance of Octopus configuration.
Definition: global.F90:177
real(real64), parameter, public m_half
Definition: global.F90:193
real(real64), parameter, public m_one
Definition: global.F90:188
This module implements the underlying real-space grid.
Definition: grid.F90:117
subroutine, public dgrid_symmetrize_vector_field(gr, field, suppress_warning)
Definition: grid.F90:687
subroutine, public dgrid_symmetrize_scalar_field(gr, field, suppress_warning)
Definition: grid.F90:661
Definition: io.F90:114
logical pure function, public kpoints_point_is_gamma(this, ik)
Definition: kpoints.F90:1549
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:115
integer pure function, public pad_pow2(size)
create array size, which is padded to powers of 2
Definition: math.F90:886
This module defines functions over batches of mesh functions.
Definition: mesh_batch.F90:116
subroutine, public zmesh_batch_exchange_points(mesh, aa, forward_map, backward_map)
This functions exchanges points of a mesh according to a certain map. Two possible maps can be given....
subroutine, public dmesh_batch_exchange_points(mesh, aa, forward_map, backward_map)
This functions exchanges points of a mesh according to a certain map. Two possible maps can be given....
This module defines various routines, operating on mesh functions.
subroutine, public zmf_random(mesh, ff, pre_shift, post_shift, seed, normalized)
This subroutine fills a function with random values.
subroutine, public dmf_random(mesh, ff, pre_shift, post_shift, seed, normalized)
This subroutine fills a function with random values.
subroutine, public zmf_normalize(mesh, dim, psi, norm)
Normalize a mesh function psi.
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:118
subroutine, public messages_not_implemented(feature, namespace)
Definition: messages.F90:1125
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:543
subroutine, public messages_obsolete_variable(namespace, name, rep)
Definition: messages.F90:1057
subroutine, public messages_info(no_lines, iunit, verbose_limit, stress, all_nodes, namespace)
Definition: messages.F90:624
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:160
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:420
subroutine, public messages_input_error(namespace, var, details, row, column)
Definition: messages.F90:723
subroutine, public messages_experimental(name, namespace)
Definition: messages.F90:1097
general module for modelmb particles
subroutine, public modelmb_particles_end(this)
subroutine, public modelmb_particles_init(this, namespace, space)
==============================================================
subroutine, public modelmb_particles_copy(modelmb_out, modelmb_in)
subroutine mpi_grp_copy(mpi_grp_out, mpi_grp_in)
Definition: mpi.F90:403
type(mpi_comm), parameter, public mpi_comm_undefined
used to indicate a communicator has not been initialized
Definition: mpi.F90:136
subroutine mpi_grp_init(grp, comm)
Initialize MPI group instance.
Definition: mpi.F90:346
This module handles the communicators for the various parallelization strategies.
Definition: multicomm.F90:145
logical pure function, public multicomm_strategy_is_parallel(mc, level)
Definition: multicomm.F90:927
subroutine, public multicomm_all_pairs_copy(apout, apin)
Definition: multicomm.F90:246
logical pure function, public multicomm_have_slaves(this)
Definition: multicomm.F90:1143
subroutine, public multicomm_create_all_pairs(mpi_grp, ap)
This routine uses the one-factorization (or near-one-factorization of a complete graph to construct a...
Definition: multicomm.F90:945
logical function, public parse_is_defined(namespace, name)
Definition: parser.F90:502
integer function, public parse_block(namespace, name, blk, check_varinfo_)
Definition: parser.F90:618
subroutine, public profiling_out(label)
Increment out counter and sum up difference between entry and exit time.
Definition: profiling.F90:623
subroutine, public profiling_in(label, exclude)
Increment in counter and save entry time.
Definition: profiling.F90:552
subroutine, public restart_read(restart, iunit, lines, nlines, ierr)
Definition: restart.F90:933
subroutine, public restart_close(restart, iunit)
Close a file previously opened with restart_open.
Definition: restart.F90:950
integer function, public restart_open(restart, filename, status, position, silent)
Open file "filename" found inside the current restart directory. Depending on the type of restart,...
Definition: restart.F90:859
subroutine, public smear_fill_occupations(this, eigenvalues, occupations, nik, nst)
Definition: smear.F90:525
subroutine, public smear_find_fermi_energy(this, namespace, eigenvalues, occupations, qtot, nik, nst, kweights)
Definition: smear.F90:344
subroutine, public smear_copy(to, from)
Definition: smear.F90:323
integer, parameter, public smear_fixed_occ
Definition: smear.F90:171
subroutine, public smear_init(this, namespace, ispin, fixed_occ, integral_occs, kpoints)
Definition: smear.F90:185
logical pure function, public smear_is_semiconducting(this)
Definition: smear.F90:829
subroutine, public states_set_complex(st)
pure logical function, public states_are_complex(st)
pure logical function, public states_are_real(st)
This module handles spin dimensions of the states and the k-point distribution.
subroutine, public states_elec_dim_copy(dout, din)
subroutine, public states_elec_dim_end(dim)
This module handles groups of electronic batches and their parallel distribution.
subroutine, public states_elec_group_copy(d, group_in, group_out, copy_data, special)
make a copy of a group
subroutine, public states_elec_group_end(this, d)
finalize the local blocks of wave functions and release local arrays
real(real64) function, public states_elec_wfns_memory(st, mesh)
return the memory usage of a states_elec_t object
integer pure function, public states_elec_block_max(st, ib)
return index of last state in block ib
subroutine zstates_elec_generate_random_vector(mesh, st, vector, normalized, reset_seed)
Generate a random vector.
subroutine, public states_elec_null(st)
subroutine, public states_elec_distribute_nodes(st, namespace, mc)
@Brief. Distribute states over the processes for states parallelization
subroutine, public states_elec_fermi(st, namespace, mesh, compute_spin)
calculate the Fermi level for the states in this object
logical function, public state_kpt_is_local(st, ist, ik)
check whether a given state (ist, ik) is on the local node
subroutine, public states_elec_calc_quantities(gr, st, kpoints, nlcc, kinetic_energy_density, paramagnetic_current, density_gradient, density_laplacian, gi_kinetic_energy_density, st_end)
calculated selected quantities
real(real64) function, dimension(1:gr%box%dim) states_elec_calculate_dipole(this, gr)
calculate the expectation value of the dipole moment of electrons
subroutine, public states_elec_densities_init(st, gr)
subroutine, public states_elec_end(st)
finalize the states_elec_t object
subroutine zstates_elec_get_state1(st, mesh, idim, ist, iqn, psi)
Write one component (dim) of a wave function into a state_elec_t object.
subroutine, public states_elec_deallocate_wfns(st)
Deallocates the KS wavefunctions defined within a states_elec_t structure.
subroutine, public states_elec_allocate_wfns(st, mesh, wfs_type, skip, packed)
Allocates the KS wavefunctions defined within a states_elec_t structure.
subroutine zstates_elec_get_points1(st, start_point, end_point, iqn, psi)
Returns the value of all the states for given k-point in the range of points [start_point:end_point].
subroutine states_elec_read_initial_occs(st, namespace, excess_charge, kpoints)
Reads from the input file the initial occupations, if the block "Occupations" is present....
subroutine, public states_elec_init(st, namespace, space, valence_charge, kpoints)
Initialize a new states_elec_t object.
subroutine zstates_elec_set_state1(st, mesh, idim, ist, iqn, psi)
get one dimension of local wave function for given k-point and states index from a states_elec_t obje...
real(real64) function, public states_elec_eigenvalues_sum(st, alt_eig)
function to calculate the eigenvalues sum using occupations as weights
integer, parameter, public par_independent
Method used to generate random states.
subroutine dstates_elec_generate_random_vector(mesh, st, vector, normalized, reset_seed)
Generate a random vector.
subroutine, public occupied_states(st, namespace, ik, n_filled, n_partially_filled, n_half_filled, filled, partially_filled, half_filled)
return information about occupied orbitals in many-body state
subroutine dstates_elec_get_state2(st, mesh, ist, iqn, psi)
Write a wave function into a state_elec_t object.
subroutine zstates_elec_set_state2(st, mesh, ist, iqn, psi)
get local wave function for given k-point and states index from a states_elec_t object
subroutine zstates_elec_get_state2(st, mesh, ist, iqn, psi)
Write a wave function into a state_elec_t object.
subroutine dstates_elec_get_points1(st, start_point, end_point, iqn, psi)
Returns the value of all the states for given k-point in the range of points [start_point:end_point].
integer pure function, public states_elec_block_size(st, ib)
return number of states in block ib
subroutine, public kpoints_distribute(this, mc)
distribute k-points over the nodes in the corresponding communicator
integer, parameter, public par_dependent
subroutine states_elec_pack(st, copy)
pack the batches in this states object
subroutine, public states_elec_choose_kpoints(st, kpoints, namespace)
double up k-points for SPIN_POLARIZED calculations
subroutine zstates_elec_get_state3(st, mesh, iqn, psi)
get local wave functions for given k-point from a states_elec_t object
logical function, public state_is_local(st, ist)
check whether a given state (ist) is on the local node
subroutine dstates_elec_set_state1(st, mesh, idim, ist, iqn, psi)
get one dimension of local wave function for given k-point and states index from a states_elec_t obje...
subroutine, public states_elec_exec_init(st, namespace, mc)
Further initializations.
subroutine dstates_elec_set_state4(st, mesh, psi)
set all local wave functions in a states_elec_t object
subroutine dstates_elec_set_state3(st, mesh, iqn, psi)
set local wave functions for given k-point in a states_elec_t object
subroutine states_elec_write_info(st, namespace)
write information about the states object
subroutine states_elec_init_block(st, mesh, verbose, skip, packed)
Initializes the data components in st that describe how the states are distributed in blocks:
real(real64) function, dimension(1:3) state_spin(mesh, f1)
calculate the spin vector for a spinor wave function f1
subroutine states_elec_kpoints_distribution(st)
Assign the start and end indices for states and kpoints, for "st_kpt_mpi_grp" communicator.
subroutine zstates_elec_get_state4(st, mesh, psi)
get all local wave functions from a states_elec_t object
subroutine, public states_elec_copy(stout, stin, exclude_wfns, exclude_eigenval, special)
make a (selective) copy of a states_elec_t object
subroutine, public states_elec_generate_random(st, mesh, kpoints, ist_start_, ist_end_, ikpt_start_, ikpt_end_, normalized)
randomize states
subroutine dstates_elec_get_state1(st, mesh, idim, ist, iqn, psi)
Write one component (dim) of a wave function into a state_elec_t object.
subroutine dstates_elec_set_state2(st, mesh, ist, iqn, psi)
get local wave function for given k-point and states index from a states_elec_t object
subroutine zstates_elec_set_state4(st, mesh, psi)
set all local wave functions in a states_elec_t object
subroutine states_elec_read_initial_spins(st, namespace)
Reads, if present, the "InitialSpins" block.
subroutine dstates_elec_get_state3(st, mesh, iqn, psi)
get local wave functions for given k-point from a states_elec_t object
subroutine dstates_elec_get_points2(st, start_point, end_point, psi)
Returns the value of all the states in the range of points [start_point:end_point].
integer pure function, public states_elec_block_min(st, ib)
return index of first state in block ib
subroutine states_elec_unpack(st, copy)
unpack the batches in this states object
subroutine, public states_elec_look(restart, nik, dim, nst, ierr)
Reads the 'states' file in the restart directory, and finds out the nik, dim, and nst contained in it...
subroutine dstates_elec_get_state4(st, mesh, psi)
get all local wave functions from a states_elec_t object
subroutine, public states_elec_count_pairs(st, namespace, n_pairs, n_occ, n_unocc, is_included, is_frac_occ)
number of occupied-unoccupied pairs for Casida
subroutine, public states_elec_allocate_current(st, space, mesh)
subroutine, public states_elec_set_zero(st)
expclicitely set all wave functions in the states to zero
subroutine zstates_elec_set_state3(st, mesh, iqn, psi)
set local wave functions for given k-point in a states_elec_t object
subroutine zstates_elec_get_points2(st, start_point, end_point, psi)
Returns the value of all the states in the range of points [start_point:end_point].
type(type_t), public type_float
Definition: types.F90:133
type(type_t), public type_cmplx
Definition: types.F90:134
brief This module defines the class unit_t which is used by the unit_systems_oct_m module.
Definition: unit.F90:132
This module defines the unit system, used for input and output.
type(unit_t), public unit_megabytes
For large amounts of data (natural code units are bytes)
subroutine, public zwfs_elec_init(this, dim, st_start, st_end, np, ik, special, packed)
initialize an empty wfs_elec_t object
Definition: wfs_elec.F90:537
subroutine, public dwfs_elec_init(this, dim, st_start, st_end, np, ik, special, packed)
initialize an empty wfs_elec_t object
Definition: wfs_elec.F90:394
subroutine reduce_all(grp)
subroutine print_kpoints_debug
Class defining batches of mesh functions.
Definition: batch.F90:159
Distribution of N instances over mpi_grpsize processes, for the local rank mpi_grprank....
Extension of space that contains the knowledge of the spin dimension.
Description of the grid, containing information on derivatives, stencil, and symmetries.
Definition: grid.F90:168
Describes mesh distribution to nodes.
Definition: mesh.F90:186
This is defined even when running serial.
Definition: mpi.F90:142
An all-pairs communication schedule for a given group.
Definition: multicomm.F90:235
Stores all communicators and groups.
Definition: multicomm.F90:206
abstract class for states
The states_elec_t class contains all electronic wave functions.
int true(void)