Octopus
isdf.F90
Go to the documentation of this file.
1!! Copyright (C) 2024 - 2025 A. Buccheri
2!!
3!! This program is free software; you can redistribute it and/or modify
4!! it under the terms of the GNU General Public License as published by
5!! the Free Software Foundation; either version 2, or (at your option)
6!! any later version.
7!!
8!! This program is distributed in the hope that it will be useful,
9!! but WITHOUT ANY WARRANTY; without even the implied warranty of
10!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11!! GNU General Public License for more details.
12!!
13!! You should have received a copy of the GNU General Public License
14!! along with this program; if not, write to the Free Software
15!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16!! 02110-1301, USA.
17
18#include "global.h"
19
21module isdf_oct_m
22 use, intrinsic :: iso_fortran_env, only: real64
27 use comm_oct_m
28 use debug_oct_m
30 use global_oct_m
31 use io_oct_m
37 use math_oct_m
38 use mesh_oct_m
41 use mpi_oct_m, only: mpi_world, mpi_double_precision, mpi_sum
45 use space_oct_m
48 use xc_cam_oct_m, only: xc_cam_t
49
50 implicit none
51 private
52 public :: &
56
57 ! TODO(Alex) Issue #1195 Extend ISDF to spin-polarised systems
59 integer, private, parameter :: ik = 1
60
61contains
62
66 subroutine isdf_ace_compute_potentials(exxop, namespace, space, mesh, st, Vx_on_st, kpoints)
67 type(exchange_operator_t), intent(in ) :: exxop
68 ! ISDF interpolation points, and cam parameters.
69 type(namespace_t), intent(in ) :: namespace
70 class(space_t), intent(in ) :: space
71 class(mesh_t), intent(in ) :: mesh
72 type(states_elec_t), intent(in ) :: st
73 type(kpoints_t), intent(in ) :: kpoints
74
75 type(states_elec_t), intent(out) :: Vx_on_st
76
77 real(real64), allocatable :: psi_mu(:, :), P_r_mu(:, :), isdf_vectors(:, :)
78 integer :: nstates
79
81
82 ! TODO(Alex) Issue #1195 Extend ISDF to spin-polarised and periodic systems
83 assert(kpoints%gamma_only())
84 assert(.not. space%is_periodic())
85 nstates = exxop%isdf%n_ks_states
86
87 call isdf_interpolation_vectors(exxop%isdf, namespace, mesh, st, exxop%isdf%centroids, psi_mu, &
88 p_r_mu, isdf_vectors)
89
90 call disdf_ace_apply_exchange_op(exxop, namespace, mesh, st, psi_mu, p_r_mu, isdf_vectors, vx_on_st)
91 safe_deallocate_a(psi_mu)
92 safe_deallocate_a(p_r_mu)
93 safe_deallocate_a(isdf_vectors)
94
96
97 end subroutine isdf_ace_compute_potentials
98
99
101 subroutine isdf_interpolation_vectors(isdf, namespace, mesh, st, centroids, psi_mu, P_r_mu, isdf_vectors)
102 type(isdf_options_t), intent(in ) :: isdf
103 type(namespace_t), intent(in ) :: namespace
104 class(mesh_t), intent(in ) :: mesh
105 type(states_elec_t), intent(in ) :: st
106 type(centroids_t), intent(in ) :: centroids
107
108 real(real64), allocatable, intent(out) :: psi_mu(:, :)
109 ! defined at interpolation points: \f$ \psi_i(\mathbf{r}_\mu) \f$
110 real(real64), allocatable, intent(out) :: P_r_mu(:, :)
111 ! \f$P_{\mathbf{r},\mu}\f$, with size (np, n_int)
112 real(real64), allocatable, intent(out) :: isdf_vectors(:, :)
113
114 character(len=6) :: np_char
115 integer :: nocc, n_int_g, rank, i, j
116 real(real64), allocatable :: zct(:, :)
117 real(real64), allocatable :: p_mu_nu(:, :)
118 ! with both variables defined at interpolation points.
119 real(real64), allocatable :: cct(:, :)
120 ! Gets overwritten with its inverse.
121
122 push_sub_with_profile(isdf_interpolation_vectors)
123
124 ! TODO(Alex) Issue #1195 Extend ISDF to spin-polarised systems
125 if (st%d%nspin > 1) then
126 call messages_not_implemented("ISDF for SPIN_POLARIZED and SPINOR calculations", namespace)
127 endif
128
129 ! TODO(Alex) Issue #1196 Template ISDF handle both real and complex states
130 if (.not. states_are_real(st)) then
131 call messages_not_implemented("ISDF handling of complex states", namespace)
132 endif
133
134 ! TODO(Alex) Issue #1276 Implement ISDF on GPU
135 if (accel_is_enabled()) then
136 call messages_not_implemented("ISDF not supported on GPU", namespace)
137 end if
138
139 ! For debug file naming
140 write(np_char, '(I6)') mpi_world%size
141
142 ! Total number of interpolation points
143 n_int_g = centroids%npoints_global()
144
145 ! Max number of states used in ISDF expansion
146 nocc = isdf%n_ks_states
147
148 if (st%st_start <= nocc .and. nocc <= st%st_end) then
149 write(message(1),'(a, 1x, I3, 1x, a, 1x, I3)') "ISDF: Computing ISDF vectors up to state", &
150 & nocc, " on process", st%mpi_grp%rank
151 call messages_info(1, namespace=namespace, debug_only=.true.)
152 endif
153
154 ! psi_mu allocated within the routine as shape varies w.r.t. PACKED or UNPACKED
155 call dphi_at_interpolation_points(mesh, st, centroids, nocc, psi_mu)
156 if (debug%info) call output_psi_mu_for_all_states(namespace, st, nocc, psi_mu)
157
158 safe_allocate(p_r_mu(1:mesh%np, 1:n_int_g))
159 call dquasi_density_matrix_at_mesh_centroid_points(st, nocc, psi_mu, p_r_mu)
160 if (debug%info) call output_matrix(namespace, "p_r_mu_np"//trim(adjustl(np_char))//".txt", p_r_mu)
162 safe_allocate(zct(1:mesh%np, 1:n_int_g))
163 call pair_product_coefficient_matrix(p_r_mu, zct)
164 if (debug%info) call output_matrix(namespace, "zct_np"//trim(adjustl(np_char))//".txt", zct)
165
166 ! Note(Alex) Might be more efficient to mask p_r_mu -> p_mu_nu than perform GEMM and allreduce
167 safe_allocate(p_mu_nu(1:n_int_g, 1:n_int_g))
168 ! Contract over the state index, ist: P_mu_nu = [psi_ist_mu]^T @ psi_ist_nu
169 if (local_number_of_states(st, nocc) > 0) then
170 call lalg_gemm(psi_mu, psi_mu, p_mu_nu, transa='T')
171 else
172 ! Process st%mpi_grp%rank contains no states that contribute to ISDF expansion
173 ! so avoid GEMM, and initialise to zero for allreduce
174 do j = 1, n_int_g
175 do i = 1, n_int_g
176 p_mu_nu(i, j) = 0.0_real64
177 enddo
178 enddo
179 endif
180
181 ! States may be distributed so all elements of p_mu_nu only contain a partial contribution from {ist}
182 call comm_allreduce(st%mpi_grp, p_mu_nu)
183 if (debug%info) call output_matrix(namespace, "p_mu_nu_np"//trim(adjustl(np_char))//".txt", p_mu_nu)
184
185 safe_allocate(cct(1:n_int_g, 1:n_int_g))
186 call coefficient_product_matrix(p_mu_nu, cct)
187 if (debug%info) then
188 call output_matrix(namespace, "cct_np"//trim(adjustl(np_char))//".txt", cct)
189 assert(is_symmetric(cct))
190 endif
191 safe_deallocate_a(p_mu_nu)
192
193 ! [CC^T]^{-1}, mutating cct in-place
194 ! NOTE, CC^T is extremely ill-conditioned once a critical number of interpolation points
195 ! is exceeded. Using the pseudo-inverse (SVD) circumvents this problem
196 write(message(1),'(a)') "ISDF: Inverting [CC^T]"
197 call messages_info(1, namespace=namespace, debug_only=.true.)
198 call lalg_svd_inverse(n_int_g, n_int_g, cct)
199 call symmetrize_matrix(n_int_g, cct)
200
201 if (isdf%check_n_interp) then
202 rank = lalg_matrix_rank_svd(cct, preserve_mat=.true.)
203 write(message(1),'(a, I7)') "ISDF: Rank of CC^T is ", rank
204 if (rank < n_int_g) then
205 write(message(2),'(a)') " - This rank is the optimal ISDFNpoints to run the calculation with"
206 else
207 write(message(2),'(a)') " - This suggests that ISDFNpoints is either optimal, or could be larger"
208 endif
209 call messages_info(2, namespace=namespace)
210 endif
211
212 ! zeta = [ZC^T][CC^T]^-1
213 safe_allocate(isdf_vectors(1:mesh%np, 1:n_int_g))
214 call lalg_gemm(mesh%np, n_int_g, n_int_g, 1.0_real64, zct, cct, 0.0_real64, isdf_vectors)
215
216 ! ISDF vectors are distributed on the mesh, so do not output in that case
217 if (debug%info .and. .not. mesh%parallel_in_domains) then
218 call output_matrix(namespace, "isdf_np"//trim(adjustl(np_char))//".txt", isdf_vectors)
219 endif
220
221 safe_deallocate_a(zct)
222 safe_deallocate_a(cct)
223
224 pop_sub_with_profile(isdf_interpolation_vectors)
225
226 end subroutine isdf_interpolation_vectors
227
228
239 subroutine pair_product_coefficient_matrix(p_phi, zct, p_psi)
240 real(real64), target, contiguous, intent(in ) :: p_phi(:, :)
241 ! \f$P^{\varphi}(\mathbf{r}, \mathbf{r}_\mu)\f$
242 real(real64), target, optional, contiguous, intent(in ) :: p_psi(:, :)
243 ! \f$P^{\psi}(\mathbf{r}, \mathbf{r}_\mu)\f$
244
245 real(real64), contiguous, intent(out) :: zct(:, :)
246
247 integer :: np, n_int_g, ip, i_mu
248 real(real64), pointer, contiguous :: p_2(:, :)
249
250 push_sub_with_profile(pair_product_coefficient_matrix)
251
252 write(message(1),'(a)') "ISDF: Constructing Z C^T"
253 call messages_info(1, debug_only=.true.)
254
255 if (present(p_psi)) then
256 p_2 => p_psi
257 else
258 p_2 => p_phi
259 endif
260
261 ! Quasi-density matrices require the same shape for element-wise multiplication
262 assert(all(shape(p_phi) == shape(p_2)))
263 ! zct should be allocated, and its shape should be consistent with the quasi-density matrices
264 assert(all(shape(p_phi) == shape(zct)))
265
266 np = size(p_phi, 1)
267 n_int_g = size(p_phi, 2)
268
269 ! Construct ZC^T
270 !$omp parallel
271 do i_mu = 1, n_int_g
272 !$omp do simd
273 do ip = 1, np
274 zct(ip, i_mu) = p_phi(ip, i_mu) * p_2(ip, i_mu)
275 enddo
276 !$omp end do simd nowait
277 enddo
278 !$omp end parallel
279 nullify(p_2)
280
281 pop_sub_with_profile(pair_product_coefficient_matrix)
282
284
285
297 subroutine coefficient_product_matrix(p_phi, cct, p_psi)
298 real(real64), target, contiguous, intent(in ) :: p_phi(:, :)
299 ! \f$P^{\varphi}(\mathbf{r}_\mu, \mathbf{r}_\nu)\f$
300 real(real64), target, contiguous, optional, intent(in ) :: p_psi(:, :)
301 ! \f$P^{\psi}(\mathbf{r}_\mu, \mathbf{r}_\nu)\f$
302
303 real(real64), contiguous, intent(out) :: cct(:, :)
304 ! Array should be allocated by the caller
305
306 integer :: n_int_g, i_mu, i_nu
307 real(real64), contiguous, pointer :: p_2(:, :)
308
309 push_sub_with_profile(coefficient_product_matrix)
310
311 write(message(1),'(a)') "ISDF: Construct CC^T"
312 call messages_info(1, debug_only=.true.)
313
314 if (present(p_psi)) then
315 p_2 => p_psi
316 else
317 p_2 => p_phi
318 endif
319
320 ! Quasi-density matrices require the same shape for element-wise multiplication
321 assert(all(shape(p_phi) == shape(p_2)))
322 ! cct should be allocated, and its shape should be consistent with the quasi-density matrices
323 assert(all(shape(p_phi) == shape(cct)))
324 n_int_g = size(p_phi, 1)
325
326 ! Construct CC^T
327 !$omp parallel do collapse(2) default(shared)
328 do i_nu = 1, n_int_g
329 do i_mu = 1, n_int_g
330 cct(i_mu, i_nu) = p_phi(i_mu, i_nu) * p_2(i_mu, i_nu)
331 enddo
332 enddo
333 !$omp end parallel do
334 nullify(p_2)
335
336 pop_sub_with_profile(coefficient_product_matrix)
337
338 end subroutine coefficient_product_matrix
339
340
346 subroutine isdf_gram_matrix(mesh, isdf_vectors, gram_matrix)
347 class(mesh_t), intent(in) :: mesh
348 real(real64), contiguous, intent(in ) :: isdf_vectors(:, :)
349 real(real64), contiguous, intent(out) :: gram_matrix(:, :)
350
351 integer :: n_int, i, j
352
353 push_sub(isdf_gram_matrix)
354
355 assert(mesh%np == size(isdf_vectors, 1))
356 n_int = size(isdf_vectors, 2)
357 assert(all(shape(gram_matrix) == [n_int, n_int]))
358
359 ! It would be more efficient to use DGEMM, but dmf_dotp ensures the correct volume element
360
361 ! Diagonal elements
362 do i = 1, n_int
363 gram_matrix(i, i) = dmf_dotp(mesh, isdf_vectors(:, i), isdf_vectors(:, i), reduce=.false.)
364 enddo
365
366 ! Upper triangle elements
367 do j = 2, n_int
368 do i = 1, j - 1
369 gram_matrix(i, j) = dmf_dotp(mesh, isdf_vectors(:, i), isdf_vectors(:, j), reduce=.false.)
370 ! Lower triangle from symmetry
371 gram_matrix(j, i) = gram_matrix(i, j)
372 enddo
373 enddo
374
375 call mesh%allreduce(gram_matrix)
376
377 pop_sub(isdf_gram_matrix)
378
379 end subroutine isdf_gram_matrix
380
381#include "real.F90"
382#include "isdf_inc.F90"
383#include "undef.F90"
384
385end module isdf_oct_m
386
387!! Local Variables:
388!! mode: f90
389!! coding: utf-8
390!! End:
There are several ways how to call batch_set_state and batch_get_state:
Definition: batch_ops.F90:203
Matrix-matrix multiplication plus matrix.
Definition: lalg_basic.F90:229
pure logical function, public accel_is_enabled()
Definition: accel.F90:418
This module implements batches of mesh functions.
Definition: batch.F90:135
integer, parameter, public batch_not_packed
functions are stored in CPU memory, unpacked order
Definition: batch.F90:286
integer, parameter, public batch_device_packed
functions are stored in device memory in packed order
Definition: batch.F90:286
integer, parameter, public batch_packed
functions are stored in CPU memory, in transposed (packed) order
Definition: batch.F90:286
This module implements common operations on batches of mesh functions.
Definition: batch_ops.F90:118
type(debug_t), save, public debug
Definition: debug.F90:158
Definition: io.F90:116
Interoperable Separable Density Fitting (ISDF) molecular implementation.
Definition: isdf.F90:116
subroutine disdf_ace_apply_exchange_op(exxop, namespace, mesh, st, psi_mu, P_r_mu, isdf_vectors, Vx_on_st)
Compute the action of the exchange potential on KS states for adaptively-compressed exchange.
Definition: isdf.F90:705
subroutine, public isdf_interpolation_vectors(isdf, namespace, mesh, st, centroids, psi_mu, P_r_mu, isdf_vectors)
Top-level routine for computing ISDF vectors.
Definition: isdf.F90:197
subroutine, public isdf_ace_compute_potentials(exxop, namespace, space, mesh, st, Vx_on_st, kpoints)
ISDF wrapper computing interpolation points and vectors, which are used to build the potential used ...
Definition: isdf.F90:162
subroutine dquasi_density_matrix_at_mesh_centroid_points(st, max_state, psi_mu, p_r_mu)
Compute the quasi-density matrix where one spatial coordinate is defined at grid points and the is de...
Definition: isdf.F90:619
subroutine dphi_at_interpolation_points(mesh, st, centroids, max_state, psi_mu)
Construct a 2D array of states, defined only at a specific subset of grid points.
Definition: isdf.F90:534
subroutine coefficient_product_matrix(p_phi, cct, p_psi)
Construct the coefficient product matrix .
Definition: isdf.F90:393
subroutine, public isdf_gram_matrix(mesh, isdf_vectors, gram_matrix)
Compute the Gram matrix for the ISDF interpolation vectors.
Definition: isdf.F90:442
subroutine pair_product_coefficient_matrix(p_phi, zct, p_psi)
Construct the matrix-matrix product .
Definition: isdf.F90:335
subroutine, public output_matrix(namespace, fname, matrix)
Helper routine to output a 2D matrix.
Definition: isdf_utils.F90:148
integer function, public local_number_of_states(st, max_state)
Number of states contributing to the expansion, local to current process.
Definition: isdf_utils.F90:208
subroutine, public output_psi_mu_for_all_states(namespace, st, max_state, psi_mu)
Output the gathered psi_mu for all states, such that the matrix is the same irregardless of state par...
Definition: isdf_utils.F90:287
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:117
logical function, public is_symmetric(a, tol)
Check if a 2D array is symmetric.
Definition: math.F90:1493
This module defines various routines, operating on mesh functions.
This module defines the meshes, which are used in Octopus.
Definition: mesh.F90:120
subroutine, public messages_not_implemented(feature, namespace)
Definition: messages.F90:1097
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:162
subroutine, public messages_info(no_lines, iunit, debug_only, stress, all_nodes, namespace)
Definition: messages.F90:600
type(mpi_grp_t), public mpi_world
Definition: mpi.F90:272
pure logical function, public states_are_real(st)
Describes mesh distribution to nodes.
Definition: mesh.F90:187
Coulomb-attenuating method parameters, used in the partitioning of the Coulomb potential into a short...
Definition: xc_cam.F90:141
int true(void)