type eigensolver_t
private
integer, public :: es_type !< which eigensolver to use
FLOAT, public :: tolerance
integer, public :: es_maxiter
FLOAT :: imag_time
!> Stores information about how well it performed.
FLOAT, allocatable, public :: diff(:, :)
integer, public :: matvec
integer, allocatable, public :: converged(:)
!> Stores information about the preconditioning.
type(preconditioner_t), public :: pre
type(subspace_t) :: sdiag
integer :: rmmdiis_minimization_iter
logical :: skip_finite_weight_kpoints
logical, public :: folded_spectrum
! cg options
logical, public :: orthogonalize_to_all
integer, public :: conjugate_direction
logical, public :: additional_terms
FLOAT, public :: energy_change_threshold
type(exponential_t) :: exponential_operator
end type eigensolver_t
subroutine eigensolver_run(eigens, namespace, gr, st, hm, iter, conv, nstconv)
type(eigensolver_t), intent(inout) :: eigens
type(namespace_t), intent(in) :: namespace
type(grid_t), intent(in) :: gr
type(states_elec_t), intent(inout) :: st
type(hamiltonian_elec_t), intent(inout) :: hm
integer, intent(in) :: iter
logical, optional, intent(out) :: conv
integer, optional, intent(in) :: nstconv !< Number of states considered for
! !< the convergence criteria
integer :: ik, ist, nstconv_
#ifdef HAVE_MPI
logical :: conv_reduced
integer :: outcount, lmatvec
FLOAT, allocatable :: ldiff(:), leigenval(:)
integer, allocatable :: lconv(:)
#endif
type(profile_t), save :: prof
call profiling_in(prof, "EIGEN_SOLVER")
PUSH_SUB(eigensolver_run)
if(present(conv)) conv = .false.
if(present(nstconv)) then
nstconv_ = nstconv
else
nstconv_ = st%nst
end if
eigens%matvec = 0
if(mpi_grp_is_root(mpi_world) .and. eigensolver_has_progress_bar(eigens) .and. .not. debug%info) then
call loct_progress_bar(-1, st%lnst*st%d%kpt%nlocal)
end if
ik_loop: do ik = st%d%kpt%start, st%d%kpt%end
if(eigens%skip_finite_weight_kpoints.and. st%d%kweights(ik) > M_ZERO) cycle
if (states_are_real(st)) then
call deigensolver_run(eigens, namespace, gr%mesh, st, hm, iter, ik)
else
call zeigensolver_run(eigens, namespace, gr%mesh, st, hm, iter, ik)
end if
if (.not. eigens%folded_spectrum) then
! recheck convergence after subspace diagonalization, since states may have reordered
eigens%converged(ik) = 0
do ist = 1, st%nst
if(eigens%diff(ist, ik) < eigens%tolerance) then
eigens%converged(ik) = ist
else
exit
end if
end do
end if
end do ik_loop
if(mpi_grp_is_root(mpi_world) .and. eigensolver_has_progress_bar(eigens) .and. .not. debug%info) then
write(stdout, '(1x)')
end if
if(present(conv)) conv = all(eigens%converged(st%d%kpt%start:st%d%kpt%end) >= nstconv_)
#ifdef HAVE_MPI
if (st%d%kpt%parallel) then
if (present(conv)) then
call st%d%kpt%mpi_grp%allreduce(conv, conv_reduced, 1, MPI_LOGICAL, MPI_LAND)
conv = conv_reduced
end if
lmatvec = eigens%matvec
call st%d%kpt%mpi_grp%allreduce(lmatvec, eigens%matvec, 1, MPI_INTEGER, MPI_SUM)
SAFE_ALLOCATE(lconv(1:st%d%kpt%nlocal))
lconv(1:st%d%kpt%nlocal) = eigens%converged(st%d%kpt%start:st%d%kpt%end)
call lmpi_gen_allgatherv(st%d%kpt%nlocal, lconv, outcount, eigens%converged, st%d%kpt%mpi_grp)
ASSERT(outcount == st%d%nik)
SAFE_DEALLOCATE_A(lconv)
! every node needs to know all eigenvalues (and diff)
SAFE_ALLOCATE(ldiff(1:st%d%kpt%nlocal))
SAFE_ALLOCATE(leigenval(1:st%d%kpt%nlocal))
do ist = st%st_start, st%st_end
ldiff(1:st%d%kpt%nlocal) = eigens%diff(ist, st%d%kpt%start:st%d%kpt%end)
leigenval(1:st%d%kpt%nlocal) = st%eigenval(ist, st%d%kpt%start:st%d%kpt%end)
call lmpi_gen_allgatherv(st%d%kpt%nlocal, ldiff, outcount, &
eigens%diff(ist, :), st%d%kpt%mpi_grp)
ASSERT(outcount == st%d%nik)
call lmpi_gen_allgatherv(st%d%kpt%nlocal, leigenval, outcount, &
st%eigenval(ist, :), st%d%kpt%mpi_grp)
ASSERT(outcount == st%d%nik)
end do
SAFE_DEALLOCATE_A(ldiff)
SAFE_DEALLOCATE_A(leigenval)
end if
#endif
POP_SUB(eigensolver_run)
call profiling_out(prof)
end subroutine eigensolver_run