Octopus
accel.F90
Go to the documentation of this file.
1!! Copyright (C) 2010-2016 X. Andrade
2!!
3!! This program is free software; you can redistribute it and/or modify
4!! it under the terms of the GNU General Public License as published by
5!! the Free Software Foundation; either version 2, or (at your option)
6!! any later version.
7!!
8!! This program is distributed in the hope that it will be useful,
9!! but WITHOUT ANY WARRANTY; without even the implied warranty of
10!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11!! GNU General Public License for more details.
12!!
13!! You should have received a copy of the GNU General Public License
14!! along with this program; if not, write to the Free Software
15!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16!! 02110-1301, USA.
17!!
18
19#include "global.h"
20
21#if defined(HAVE_OPENCL) && defined(HAVE_CUDA)
22#error "Cannot compile with OpenCL and Cuda support at the same time"
23#endif
24
25#if defined(HAVE_OPENCL) || defined(HAVE_CUDA)
26#define HAVE_ACCEL 1
27#endif
28
29module accel_oct_m
31#ifdef HAVE_OPENCL
32 use cl
33#endif
34#if defined(HAVE_CLBLAS) || defined(HAVE_CLBLAST)
35 use clblas_oct_m
36#endif
37 use cuda_oct_m
38#ifdef HAVE_CLFFT
39 use clfft
40#endif
41 use debug_oct_m
42 use global_oct_m
43 use iso_c_binding, only: c_size_t
44 use, intrinsic :: iso_fortran_env
45 use loct_oct_m
46 use math_oct_m
48 use mpi_oct_m
50 use types_oct_m
51 use parser_oct_m
55 use string_oct_m
56
57 implicit none
58
59 private
60
61 public :: &
66 accel_t, &
69 accel_init, &
70 accel_end, &
102
103#ifdef HAVE_OPENCL
104 integer, public, parameter :: &
105 ACCEL_MEM_READ_ONLY = cl_mem_read_only, &
106 accel_mem_read_write = cl_mem_read_write, &
107 accel_mem_write_only = cl_mem_write_only
108#else
109 integer, public, parameter :: &
110 ACCEL_MEM_READ_ONLY = 0, &
113#endif
114
115 type accel_context_t
116 ! Components are public by default
117#ifdef HAVE_OPENCL
118 type(cl_context) :: cl_context
119#elif defined(HAVE_CUDA)
120 type(c_ptr) :: cuda_context
121#else
122 integer :: dummy
123#endif
124 end type accel_context_t
125
126 type accel_device_t
127 ! Components are public by default
128#ifdef HAVE_OPENCL
129 type(cl_device_id) :: cl_device
130#elif defined(HAVE_CUDA)
131 type(c_ptr) :: cuda_device
132#else
133 integer :: dummy
134#endif
135 end type accel_device_t
136
137 type accel_t
138 ! Components are public by default
139 type(accel_context_t) :: context
140 type(accel_device_t) :: device
141#ifdef HAVE_OPENCL
142 type(cl_command_queue) :: command_queue
143#endif
144 type(c_ptr) :: cublas_handle
145 type(c_ptr) :: cuda_stream
146 type(c_ptr) :: module_map
147 integer :: max_workgroup_size
148 integer(int64) :: local_memory_size
149 integer(int64) :: global_memory_size
150 logical :: enabled
151 logical :: allow_CPU_only
152 logical :: shared_mem
153 logical :: cuda_mpi
154 integer :: warp_size
155 integer(int64) :: initialize_buffers
156 character(len=32) :: debug_flag
157 integer(int64) :: max_block_dim(3)
158 integer(int64) :: max_grid_dim(3)
159 end type accel_t
160
161 type accel_mem_t
162 ! Components are public by default
163#ifdef HAVE_OPENCL
164 type(cl_mem) :: mem
165#else
166 type(c_ptr) :: mem
167#endif
168 integer(c_size_t) :: size = 0
169 type(type_t) :: type
170 integer :: flags = 0
171 logical :: allocated = .false.
172 end type accel_mem_t
173
174 type accel_kernel_t
175 ! Components are public by default
176#ifdef HAVE_OPENCL
177 type(cl_kernel) :: kernel
178#endif
179#ifdef HAVE_CUDA
180 type(c_ptr) :: cuda_kernel
181 type(c_ptr) :: cuda_module
182 type(c_ptr) :: arguments
183#endif
184 integer(int64) :: cuda_shared_mem
185 logical :: initialized = .false.
186 type(accel_kernel_t), pointer :: next
187 integer :: arg_count
188 character(len=128) :: kernel_name
189 end type accel_kernel_t
190
191 type(accel_t), public :: accel
192
193 ! Global variables defined on device
194 type(accel_mem_t), public, save :: zM_0_buffer, zM_1_buffer
195 type(accel_mem_t), public, save :: dM_0_buffer, dM_1_buffer
197 ! the kernels
198 type(accel_kernel_t), public, target, save :: kernel_vpsi
199 type(accel_kernel_t), public, target, save :: kernel_vpsi_complex
200 type(accel_kernel_t), public, target, save :: kernel_vpsi_spinors
201 type(accel_kernel_t), public, target, save :: kernel_vpsi_spinors_complex
202 type(accel_kernel_t), public, target, save :: kernel_daxpy
203 type(accel_kernel_t), public, target, save :: kernel_zaxpy
204 type(accel_kernel_t), public, target, save :: kernel_copy
205 type(accel_kernel_t), public, target, save :: kernel_copy_complex_to_real
206 type(accel_kernel_t), public, target, save :: kernel_copy_real_to_complex
207 type(accel_kernel_t), public, target, save :: dpack
208 type(accel_kernel_t), public, target, save :: zpack
209 type(accel_kernel_t), public, target, save :: dunpack
210 type(accel_kernel_t), public, target, save :: zunpack
211 type(accel_kernel_t), public, target, save :: kernel_ghost_reorder
212 type(accel_kernel_t), public, target, save :: kernel_density_real
213 type(accel_kernel_t), public, target, save :: kernel_density_complex
214 type(accel_kernel_t), public, target, save :: kernel_density_spinors
215 type(accel_kernel_t), public, target, save :: kernel_phase
216 type(accel_kernel_t), public, target, save :: kernel_phase_spiral
217 type(accel_kernel_t), public, target, save :: dkernel_dot_matrix
218 type(accel_kernel_t), public, target, save :: zkernel_dot_matrix
219 type(accel_kernel_t), public, target, save :: zkernel_dot_matrix_spinors
220 type(accel_kernel_t), public, target, save :: dkernel_batch_axpy
221 type(accel_kernel_t), public, target, save :: zkernel_batch_axpy
222 type(accel_kernel_t), public, target, save :: dkernel_ax_function_py
223 type(accel_kernel_t), public, target, save :: zkernel_ax_function_py
224 type(accel_kernel_t), public, target, save :: dkernel_batch_dotp
225 type(accel_kernel_t), public, target, save :: zkernel_batch_dotp
226 type(accel_kernel_t), public, target, save :: dzmul
227 type(accel_kernel_t), public, target, save :: zzmul
228
229 interface accel_padded_size
231 end interface accel_padded_size
239 end interface accel_kernel_run
259 end interface accel_write_buffer
260
270 end interface accel_read_buffer
272 interface accel_set_kernel_arg
273 module procedure &
298 module procedure &
306 module procedure &
312
314 integer, parameter :: &
315 OPENCL_GPU = -1, &
316 opencl_cpu = -2, &
318 opencl_default = -4
319
320
321 integer, parameter :: &
322 CL_PLAT_INVALID = -1, &
323 cl_plat_amd = -2, &
324 cl_plat_nvidia = -3, &
326 cl_plat_intel = -5
327
328 ! a "convenience" public variable
329 integer, public :: cl_status
330
331 integer :: buffer_alloc_count
332 integer(int64) :: allocated_mem
333 type(accel_kernel_t), pointer :: head
334 type(alloc_cache_t) :: memcache
335
336contains
337
338 pure logical function accel_is_enabled() result(enabled)
339#ifdef HAVE_ACCEL
340 enabled = accel%enabled
341#else
342 enabled = .false.
343#endif
344 end function accel_is_enabled
345
346 ! ------------------------------------------
347
348 pure logical function accel_allow_cpu_only() result(allow)
349#ifdef HAVE_ACCEL
350 allow = accel%allow_CPU_only
351#else
352 allow = .true.
353#endif
354 end function accel_allow_cpu_only
355
356 ! ------------------------------------------
357
358 subroutine accel_init(base_grp, namespace)
359 type(mpi_grp_t), intent(inout) :: base_grp
360 type(namespace_t), intent(in) :: namespace
361
362 logical :: disable, default, run_benchmark
363 integer :: idevice, iplatform
364#ifdef HAVE_OPENCL
365 integer :: device_type
366 integer :: cl_status, idev
367 integer :: ndevices, ret_devices, nplatforms, iplat
368 character(len=256) :: device_name
369 type(cl_platform_id) :: platform_id
370 type(cl_program) :: prog
371 type(cl_platform_id), allocatable :: allplatforms(:)
372 type(cl_device_id), allocatable :: alldevices(:)
373 integer :: max_work_item_dimensions
374 integer(int64), allocatable :: max_work_item_sizes(:)
375#endif
376#ifdef HAVE_CUDA
377 integer :: dim
378#ifdef HAVE_MPI
379 character(len=256) :: sys_name
380#endif
381#endif
382
383 push_sub(accel_init)
384
385 buffer_alloc_count = 0
386
387 !%Variable DisableAccel
388 !%Type logical
389 !%Default yes
390 !%Section Execution::Accel
391 !%Description
392 !% If Octopus was compiled with OpenCL or CUDA support, it will
393 !% try to initialize and use an accelerator device. By setting this
394 !% variable to <tt>yes</tt> you force Octopus not to use an accelerator even it is available.
395 !%End
396 call messages_obsolete_variable(namespace, 'DisableOpenCL', 'DisableAccel')
397#ifdef HAVE_ACCEL
398 default = .false.
399#else
400 default = .true.
401#endif
402 call parse_variable(namespace, 'DisableAccel', default, disable)
403 accel%enabled = .not. disable
404
405#ifndef HAVE_ACCEL
406 if (accel%enabled) then
407 message(1) = 'Octopus was compiled without OpenCL or Cuda support.'
408 call messages_fatal(1)
409 end if
410#endif
412 if (.not. accel_is_enabled()) then
413 pop_sub(accel_init)
414 return
415 end if
416
417 !%Variable AccelPlatform
418 !%Type integer
419 !%Default 0
420 !%Section Execution::Accel
421 !%Description
422 !% This variable selects the OpenCL platform that Octopus will
423 !% use. You can give an explicit platform number or use one of
424 !% the options that select a particular vendor
425 !% implementation. Platform 0 is used by default.
426 !%
427 !% This variable has no effect for CUDA.
428 !%Option amd -2
429 !% Use the AMD OpenCL platform.
430 !%Option nvidia -3
431 !% Use the Nvidia OpenCL platform.
432 !%Option ati -4
433 !% Use the ATI (old AMD) OpenCL platform.
434 !%Option intel -5
435 !% Use the Intel OpenCL platform.
436 !%End
437 call parse_variable(namespace, 'AccelPlatform', 0, iplatform)
439 call messages_obsolete_variable(namespace, 'OpenCLPlatform', 'AccelPlatform')
440
441 !%Variable AccelDevice
442 !%Type integer
443 !%Default gpu
444 !%Section Execution::Accel
445 !%Description
446 !% This variable selects the OpenCL or CUDA accelerator device
447 !% that Octopus will use. You can specify one of the options below
448 !% or a numerical id to select a specific device.
449 !%
450 !% Values >= 0 select the device to be used. In case of MPI enabled runs
451 !% devices are distributed in a round robin fashion, starting at this value.
452 !%Option gpu -1
453 !% If available, Octopus will use a GPU.
454 !%Option cpu -2
455 !% If available, Octopus will use a CPU (only for OpenCL).
456 !%Option accelerator -3
457 !% If available, Octopus will use an accelerator (only for OpenCL).
458 !%Option accel_default -4
459 !% Octopus will use the default device specified by the implementation.
460 !% implementation.
461 !%End
462 call parse_variable(namespace, 'AccelDevice', opencl_gpu, idevice)
463
464 call messages_obsolete_variable(namespace, 'OpenCLDevice', 'AccelDevice')
465
466 if (idevice < opencl_default) then
467 call messages_write('Invalid AccelDevice')
468 call messages_fatal()
469 end if
470
471 call messages_print_with_emphasis(msg="GPU acceleration", namespace=namespace)
472
473#ifdef HAVE_CUDA
474 if (idevice<0) idevice = 0
475 call cuda_init(accel%context%cuda_context, accel%device%cuda_device, accel%cuda_stream, &
476 idevice, base_grp%rank)
477#ifdef HAVE_MPI
478 call loct_sysname(sys_name)
479 write(message(1), '(A,I5,A,I5,2A)') "Rank ", base_grp%rank, " uses device number ", idevice, &
480 " on ", trim(sys_name)
481 call messages_info(1, all_nodes = .true.)
482#endif
483
484 ! no shared mem support in our cuda interface (for the moment)
485 accel%shared_mem = .true.
486
487 call cublas_init(accel%cublas_handle, accel%cuda_stream)
488#endif
489
490#ifdef HAVE_OPENCL
491 call profiling_in('CL_INIT')
492
493 call clgetplatformids(nplatforms, cl_status)
494 if (cl_status /= cl_success) call opencl_print_error(cl_status, "GetPlatformIDs")
495
496 safe_allocate(allplatforms(1:nplatforms))
497
498 call clgetplatformids(allplatforms, iplat, cl_status)
499 if (cl_status /= cl_success) call opencl_print_error(cl_status, "GetPlatformIDs")
500
501 call messages_write('Info: Available CL platforms: ')
502 call messages_write(nplatforms)
503 call messages_info()
504
505 do iplat = 1, nplatforms
506
507 call clgetplatforminfo(allplatforms(iplat), cl_platform_name, device_name, cl_status)
508
509 if (iplatform < 0) then
510 if (iplatform == get_platform_id(device_name)) iplatform = iplat - 1
511 end if
512
513 if (iplatform == iplat - 1) then
514 call messages_write(' * Platform ')
515 else
516 call messages_write(' Platform ')
517 end if
518
519 call messages_write(iplat - 1)
520 call messages_write(' : '//device_name)
521 call clgetplatforminfo(allplatforms(iplat), cl_platform_version, device_name, cl_status)
522 call messages_write(' ('//trim(device_name)//')')
523 call messages_info()
524 end do
525
526 call messages_info()
527
528 if (iplatform >= nplatforms .or. iplatform < 0) then
529 call messages_write('Requested CL platform does not exist')
530 if (iplatform > 0) then
531 call messages_write('(platform = ')
532 call messages_write(iplatform)
533 call messages_write(').')
534 end if
535 call messages_fatal()
536 end if
537
538 platform_id = allplatforms(iplatform + 1)
539
540 safe_deallocate_a(allplatforms)
541
542 call clgetdeviceids(platform_id, cl_device_type_all, ndevices, cl_status)
543
544 call messages_write('Info: Available CL devices: ')
545 call messages_write(ndevices)
546 call messages_info()
547
548 safe_allocate(alldevices(1:ndevices))
549
550 ! list all devices
551
552 call clgetdeviceids(platform_id, cl_device_type_all, alldevices, ret_devices, cl_status)
553
554 do idev = 1, ndevices
555 call messages_write(' Device ')
556 call messages_write(idev - 1)
557 call clgetdeviceinfo(alldevices(idev), cl_device_name, device_name, cl_status)
558 call messages_write(' : '//device_name)
559 call messages_info()
560 end do
561
562 select case (idevice)
563 case (opencl_gpu)
564 device_type = cl_device_type_gpu
565 case (opencl_cpu)
566 device_type = cl_device_type_cpu
567 case (opencl_accelerator)
568 device_type = cl_device_type_accelerator
569 case (opencl_default)
570 device_type = cl_device_type_default
571 case default
572 device_type = cl_device_type_all
573 end select
574
575 ! now get a list of the selected type
576 call clgetdeviceids(platform_id, device_type, alldevices, ret_devices, cl_status)
577
578 if (ret_devices < 1) then
579 ! we didnt find a device of the selected type, we ask for the default device
580 call clgetdeviceids(platform_id, cl_device_type_default, alldevices, ret_devices, cl_status)
581
582 if (ret_devices < 1) then
583 ! if this does not work, we ask for all devices
584 call clgetdeviceids(platform_id, cl_device_type_all, alldevices, ret_devices, cl_status)
585 end if
586
587 if (ret_devices < 1) then
588 call messages_write('Cannot find an OpenCL device')
589 call messages_fatal()
590 end if
591 end if
592
593 ! the number of devices can be smaller
594 ndevices = ret_devices
595
596 if (idevice < 0) then
597 if (base_grp%size > 1) then
598 ! with MPI we have to select the device so multiple GPUs in one
599 ! node are correctly distributed
600 call select_device(idevice)
601 else
602 idevice = 0
603 end if
604 end if
605
606 if (idevice >= ndevices) then
607 call messages_write('Requested CL device does not exist (device = ')
608 call messages_write(idevice)
609 call messages_write(', platform = ')
610 call messages_write(iplatform)
611 call messages_write(').')
612 call messages_fatal()
613 end if
614
615 accel%device%cl_device = alldevices(idevice + 1)
616
617 ! create the context
618 accel%context%cl_context = clcreatecontext(platform_id, accel%device%cl_device, cl_status)
619 if (cl_status /= cl_success) call opencl_print_error(cl_status, "CreateContext")
620
621 safe_deallocate_a(alldevices)
622
623 accel%command_queue = clcreatecommandqueue(accel%context%cl_context, accel%device%cl_device, &
624 cl_queue_profiling_enable, cl_status)
625 if (cl_status /= cl_success) call opencl_print_error(cl_status, "CreateCommandQueue")
626
627 call clgetdeviceinfo(accel%device%cl_device, cl_device_type, device_type, cl_status)
628
629 select case (device_type)
630 case (cl_device_type_gpu)
631 accel%shared_mem = .true.
632 case (cl_device_type_cpu, cl_device_type_accelerator)
633 accel%shared_mem = .false.
634 case default
635 accel%shared_mem = .false.
636 end select
637
638#ifdef HAVE_CLBLAS
639 call clblassetup(cl_status)
640 if (cl_status /= clblassuccess) call clblas_print_error(cl_status, 'clblasSetup')
641#endif
642
643#ifdef HAVE_CLFFT
644 call clfftsetup(cl_status)
645 if (cl_status /= clfft_success) call clfft_print_error(cl_status, 'clfftSetup')
646#endif
647
648 call profiling_out('CL_INIT')
649#endif
650
651 ! Get some device information that we will need later
652
653 ! total memory
654#ifdef HAVE_OPENCL
655 call clgetdeviceinfo(accel%device%cl_device, cl_device_global_mem_size, accel%global_memory_size, cl_status)
656 call clgetdeviceinfo(accel%device%cl_device, cl_device_local_mem_size, accel%local_memory_size, cl_status)
657 call clgetdeviceinfo(accel%device%cl_device, cl_device_max_work_group_size, accel%max_workgroup_size, cl_status)
658 accel%warp_size = 1
659 call clgetdeviceinfo(accel%device%cl_device, cl_device_max_work_item_dimensions, max_work_item_dimensions, cl_status)
660 if (max_work_item_dimensions < 3) then
661 message(1) = "Octopus requires a device where CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS is at least 3."
662 call messages_fatal(1, only_root_writes = .true., namespace=namespace)
663 end if
664 safe_allocate(max_work_item_sizes(1:max_work_item_dimensions))
665 call clgetdeviceinfo(accel%device%cl_device, cl_device_max_work_item_sizes, max_work_item_sizes(1), cl_status)
666 accel%max_block_dim(:) = max_work_item_sizes(1:3)
667 safe_deallocate_a(max_work_item_sizes)
668 ! In principle OpenCL does not set any limits on the global_work_size. It is
669 ! only limited by the available resources. Therefore we use the default
670 ! values for NVIDIA GPUs starting at CC 5.0. No idea whether these will work
671 ! generically.
672 ! https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features
673 ! -and-technical-specifications-technical-specifications-per-compute-capability
674 accel%max_grid_dim(1) = (2_int64)**31 - 1_int64
675 accel%max_grid_dim(2) = 65536_int64
676 accel%max_grid_dim(3) = 65536_int64
677#endif
678#ifdef HAVE_CUDA
679 call cuda_device_total_memory(accel%device%cuda_device, accel%global_memory_size)
680 call cuda_device_shared_memory(accel%device%cuda_device, accel%local_memory_size)
681 call cuda_device_max_threads_per_block(accel%device%cuda_device, accel%max_workgroup_size)
682 call cuda_device_get_warpsize(accel%device%cuda_device, accel%warp_size)
683 call cuda_device_max_block_dim_x(accel%device%cuda_device, dim)
684 accel%max_block_dim(1) = int(dim, int64)
685 call cuda_device_max_block_dim_y(accel%device%cuda_device, dim)
686 accel%max_block_dim(2) = int(dim, int64)
687 call cuda_device_max_block_dim_z(accel%device%cuda_device, dim)
688 accel%max_block_dim(3) = int(dim, int64)
689 call cuda_device_max_grid_dim_x(accel%device%cuda_device, dim)
690 accel%max_grid_dim(1) = int(dim, int64)
691 call cuda_device_max_grid_dim_y(accel%device%cuda_device, dim)
692 accel%max_grid_dim(2) = int(dim, int64)
693 call cuda_device_max_grid_dim_z(accel%device%cuda_device, dim)
694 accel%max_grid_dim(3) = int(dim, int64)
695#endif
696
697 if (base_grp%is_root()) call device_info()
698
699 ! initialize the cache used to speed up allocations
700 call alloc_cache_init(memcache, nint(0.25_real64*accel%global_memory_size, int64))
701
702 ! now initialize the kernels
704
705#if defined(HAVE_HIP)
706 accel%debug_flag = "-g"
707#elif defined(HAVE_CUDA)
708 accel%debug_flag = "-lineinfo"
709#elif defined(HAVE_OPENCL)
710 accel%debug_flag = "-g"
711#endif
712
713 call accel_kernel_start_call(kernel_vpsi, 'vpsi.cl', "vpsi")
714 call accel_kernel_start_call(kernel_vpsi_complex, 'vpsi.cl', "vpsi_complex")
715 call accel_kernel_start_call(kernel_vpsi_spinors, 'vpsi.cl', "vpsi_spinors")
716 call accel_kernel_start_call(kernel_vpsi_spinors_complex, 'vpsi.cl', "vpsi_spinors_complex")
717 call accel_kernel_start_call(kernel_daxpy, 'axpy.cl', "daxpy", flags = '-DRTYPE_DOUBLE')
718 call accel_kernel_start_call(kernel_zaxpy, 'axpy.cl', "zaxpy", flags = '-DRTYPE_COMPLEX')
719 call accel_kernel_start_call(dkernel_batch_axpy, 'axpy.cl', "dbatch_axpy_function", &
720 flags = ' -DRTYPE_DOUBLE')
721 call accel_kernel_start_call(zkernel_batch_axpy, 'axpy.cl', "zbatch_axpy_function", &
722 flags = '-DRTYPE_COMPLEX')
723 call accel_kernel_start_call(dkernel_ax_function_py, 'axpy.cl', "dbatch_ax_function_py", &
724 flags = '-DRTYPE_DOUBLE')
725 call accel_kernel_start_call(zkernel_ax_function_py, 'axpy.cl', "zbatch_ax_function_py", &
726 flags = '-DRTYPE_COMPLEX')
727 call accel_kernel_start_call(dkernel_batch_dotp, 'mesh_batch_single.cl', "dbatch_mf_dotp")
728 call accel_kernel_start_call(zkernel_batch_dotp, 'mesh_batch_single.cl', "zbatch_mf_dotp")
729 call accel_kernel_start_call(dpack, 'pack.cl', "dpack")
730 call accel_kernel_start_call(zpack, 'pack.cl', "zpack")
731 call accel_kernel_start_call(dunpack, 'pack.cl', "dunpack")
732 call accel_kernel_start_call(zunpack, 'pack.cl', "zunpack")
733 call accel_kernel_start_call(kernel_copy, 'copy.cl', "copy")
734 call accel_kernel_start_call(kernel_copy_complex_to_real, 'copy.cl', "copy_complex_to_real")
735 call accel_kernel_start_call(kernel_copy_real_to_complex, 'copy.cl', "copy_real_to_complex")
736 call accel_kernel_start_call(kernel_ghost_reorder, 'ghost.cl', "ghost_reorder")
737 call accel_kernel_start_call(kernel_density_real, 'density.cl', "density_real")
738 call accel_kernel_start_call(kernel_density_complex, 'density.cl', "density_complex")
739 call accel_kernel_start_call(kernel_density_spinors, 'density.cl', "density_spinors")
740 call accel_kernel_start_call(kernel_phase, 'phase.cl', "phase")
741 call accel_kernel_start_call(dkernel_dot_matrix, 'mesh_batch.cl', "ddot_matrix")
742 call accel_kernel_start_call(zkernel_dot_matrix, 'mesh_batch.cl', "zdot_matrix")
743 call accel_kernel_start_call(zkernel_dot_matrix_spinors, 'mesh_batch.cl', "zdot_matrix_spinors")
744
746 call accel_kernel_start_call(dzmul, 'mul.cl', "dzmul", flags = '-DRTYPE_DOUBLE')
747 call accel_kernel_start_call(zzmul, 'mul.cl', "zzmul", flags = '-DRTYPE_COMPLEX')
748
749 ! Define global buffers
750 if(.not. accel_buffer_is_allocated(zm_0_buffer)) then
751 call accel_create_buffer(zm_0_buffer, accel_mem_read_only, type_cmplx, 1)
752 call accel_write_buffer(zm_0_buffer, m_z0)
753 end if
754 if(.not. accel_buffer_is_allocated(zm_1_buffer)) then
755 call accel_create_buffer(zm_1_buffer, accel_mem_read_only, type_cmplx, 1)
756 call accel_write_buffer(zm_1_buffer, m_z1)
757 end if
758 if(.not. accel_buffer_is_allocated(dm_0_buffer)) then
759 call accel_create_buffer(dm_0_buffer, accel_mem_read_only, type_float, 1)
760 call accel_write_buffer(dm_0_buffer, m_zero)
761 end if
762 if(.not. accel_buffer_is_allocated(dm_1_buffer)) then
763 call accel_create_buffer(dm_1_buffer, accel_mem_read_only, type_float, 1)
764 call accel_write_buffer(dm_1_buffer, m_one)
765 end if
766
767
768 !%Variable AccelBenchmark
769 !%Type logical
770 !%Default no
771 !%Section Execution::Accel
772 !%Description
773 !% If this variable is set to yes, Octopus will run some
774 !% routines to benchmark the performance of the accelerator device.
775 !%End
776 call parse_variable(namespace, 'AccelBenchmark', .false., run_benchmark)
777
778 call messages_obsolete_variable(namespace, 'OpenCLBenchmark', 'AccelBenchmark')
779
780 if (run_benchmark) then
782 end if
783
784 !%Variable GPUAwareMPI
785 !%Type logical
786 !%Section Execution::Accel
787 !%Description
788 !% If Octopus was compiled with GPU support and MPI support and if the MPI
789 !% implementation is GPU-aware (i.e., it supports communication using device pointers),
790 !% this switch can be set to true to use the GPU-aware MPI features. The advantage
791 !% of this approach is that it can do, e.g., peer-to-peer copies between devices without
792 !% going through the host memory.
793 !% The default is false, except when the configure switch --enable-cudampi is set, in which
794 !% case this variable is set to true.
795 !%End
796#ifdef HAVE_CUDA_MPI
797 default = .true.
798#else
799 default = .false.
800#endif
801 call parse_variable(namespace, 'GPUAwareMPI', default, accel%cuda_mpi)
802 if (accel%cuda_mpi) then
803#ifndef HAVE_CUDA_MPI
804 call messages_write("Warning: trying to use GPU-aware MPI, but we have not detected support in the linked MPI library.")
805 call messages_warning()
806#endif
807 call messages_write("Using GPU-aware MPI.")
808 call messages_info()
809 end if
810
811
812 !%Variable AllowCPUonly
813 !%Type logical
814 !%Section Execution::Accel
815 !%Description
816 !% In order to prevent waste of resources, the code will normally stop when the GPU is disabled due to
817 !% incomplete implementations or incompatibilities. AllowCPUonly = yes overrides this and allows the
818 !% code execution also in these cases.
819 !%End
820#if defined (HAVE_ACCEL)
821 default = .false.
822#else
823 default = .true.
824#endif
825 call parse_variable(namespace, 'AllowCPUonly', default, accel%allow_CPU_only)
826
827
828 !%Variable InitializeGPUBuffers
829 !%Type integer
830 !%Default no
831 !%Section Execution::Accel
832 !%Description
833 !% Initialize new GPU buffers to zero on creation (use only for debugging, as it has a performance impact!).
834 !%Option no 0
835 !% Do not initialize GPU buffers.
836 !%Option yes 1
837 !% Initialize GPU buffers to zero.
838 !%Option nan 2
839 !% Initialize GPU buffers to nan.
840 !%End
841 call parse_variable(namespace, 'InitializeGPUBuffers', option__initializegpubuffers__no, accel%initialize_buffers)
842 if (.not. varinfo_valid_option('InitializeGPUBuffers', accel%initialize_buffers)) then
843 call messages_input_error(namespace, 'InitializeGPUBuffers')
844 end if
845
846
847 call messages_print_with_emphasis(namespace=namespace)
848
849 pop_sub(accel_init)
850
851 contains
853#if defined(HAVE_OPENCL)
854 subroutine select_device(idevice)
855 integer, intent(inout) :: idevice
856 integer :: irank
857 character(len=256) :: device_name
858
859 push_sub(accel_init.select_device)
860
861 idevice = mod(base_grp%rank, ndevices)
862
863 call base_grp%barrier()
864 call messages_write('Info: CL device distribution:')
866 do irank = 0, base_grp%size - 1
867 if (irank == base_grp%rank) then
868 call clgetdeviceinfo(alldevices(idevice + 1), cl_device_name, device_name, cl_status)
869 call messages_write(' MPI node ')
870 call messages_write(base_grp%rank)
871 call messages_write(' -> CL device ')
872 call messages_write(idevice)
873 call messages_write(' : '//device_name)
874 call messages_info(all_nodes = .true.)
875 end if
876 call base_grp%barrier()
877 end do
878
879 pop_sub(accel_init.select_device)
880 end subroutine select_device
881#endif
882
883 subroutine device_info()
884#ifdef HAVE_OPENCL
885 integer(int64) :: val
886#endif
887#ifdef HAVE_CUDA
888 integer :: version
889#endif
890 integer :: major, minor
891 character(len=256) :: val_str
892
893 push_sub(accel_init.device_info)
894
895 call messages_new_line()
896 call messages_write('Selected device:')
897 call messages_new_line()
898
899#ifdef HAVE_OPENCL
900 call messages_write(' Framework : OpenCL')
901#endif
902#ifdef HAVE_CUDA
903#ifdef __HIP_PLATFORM_AMD__
904 call messages_write(' Framework : ROCm')
905#else
906 call messages_write(' Framework : CUDA')
907#endif
908#endif
909 call messages_info()
910
911#ifdef HAVE_CUDA
912 call messages_write(' Device type : GPU', new_line = .true.)
913#ifdef __HIP_PLATFORM_AMD__
914 call messages_write(' Device vendor : AMD Corporation', new_line = .true.)
915#else
916 call messages_write(' Device vendor : NVIDIA Corporation', new_line = .true.)
917#endif
918#endif
919
920#ifdef HAVE_OPENCL
921 call clgetdeviceinfo(accel%device%cl_device, cl_device_type, val, cl_status)
922 call messages_write(' Device type :')
923 select case (int(val, int32))
924 case (cl_device_type_gpu)
925 call messages_write(' GPU')
926 case (cl_device_type_cpu)
927 call messages_write(' CPU')
928 case (cl_device_type_accelerator)
929 call messages_write(' accelerator')
930 end select
931 call messages_new_line()
932
933 call clgetdeviceinfo(accel%device%cl_device, cl_device_vendor, val_str, cl_status)
934 call messages_write(' Device vendor : '//trim(val_str))
935 call messages_new_line()
936#endif
937
938#ifdef HAVE_OPENCL
939 call clgetdeviceinfo(accel%device%cl_device, cl_device_name, val_str, cl_status)
940#endif
941#ifdef HAVE_CUDA
942 call cuda_device_name(accel%device%cuda_device, val_str)
943#endif
944 call messages_write(' Device name : '//trim(val_str))
945 call messages_new_line()
946
947#ifdef HAVE_CUDA
948 call cuda_device_capability(accel%device%cuda_device, major, minor)
949#endif
950 call messages_write(' Cuda capabilities :')
951 call messages_write(major, fmt = '(i2)')
952 call messages_write('.')
953 call messages_write(minor, fmt = '(i1)')
954 call messages_new_line()
955
956 ! VERSION
957#ifdef HAVE_OPENCL
958 call clgetdeviceinfo(accel%device%cl_device, cl_driver_version, val_str, cl_status)
959 call messages_write(' Driver version : '//trim(val_str))
960#endif
961#ifdef HAVE_CUDA
962 call cuda_driver_version(version)
963 call messages_write(' Driver version : ')
964 call messages_write(version)
965#endif
966 call messages_new_line()
967
968
969#ifdef HAVE_OPENCL
970 call clgetdeviceinfo(accel%device%cl_device, cl_device_max_compute_units, val, cl_status)
971 call messages_write(' Compute units :')
972 call messages_write(val)
973 call messages_new_line()
974
975 call clgetdeviceinfo(accel%device%cl_device, cl_device_max_clock_frequency, val, cl_status)
976 call messages_write(' Clock frequency :')
977 call messages_write(val)
978 call messages_write(' GHz')
979 call messages_new_line()
980#endif
981
982 call messages_write(' Device memory :')
983 call messages_write(accel%global_memory_size, units=unit_megabytes)
985
986 call messages_write(' Local/shared memory :')
987 call messages_write(accel%local_memory_size, units=unit_kilobytes)
988 call messages_new_line()
989
990
991#ifdef HAVE_OPENCL
992 call clgetdeviceinfo(accel%device%cl_device, cl_device_max_mem_alloc_size, val, cl_status)
993 call messages_write(' Max alloc size :')
994 call messages_write(val, units = unit_megabytes)
995 call messages_new_line()
996
997 call clgetdeviceinfo(accel%device%cl_device, cl_device_global_mem_cache_size, val, cl_status)
998 call messages_write(' Device cache :')
999 call messages_write(val, units = unit_kilobytes)
1000 call messages_new_line()
1001
1002 call clgetdeviceinfo(accel%device%cl_device, cl_device_max_constant_buffer_size, val, cl_status)
1003 call messages_write(' Constant memory :')
1005 call messages_new_line()
1006#endif
1007
1008 call messages_write(' Max. group/block size :')
1009 call messages_write(accel%max_workgroup_size)
1010 call messages_new_line()
1011
1012
1013#ifdef HAVE_OPENCL
1014 call messages_write(' Extension cl_khr_fp64 :')
1015 call messages_write(f90_cl_device_has_extension(accel%device%cl_device, "cl_khr_fp64"))
1016 call messages_new_line()
1017
1018 call messages_write(' Extension cl_amd_fp64 :')
1019 call messages_write(f90_cl_device_has_extension(accel%device%cl_device, "cl_amd_fp64"))
1020 call messages_new_line()
1022 call messages_write(' Extension cl_khr_int64_base_atomics :')
1023 call messages_write(f90_cl_device_has_extension(accel%device%cl_device, "cl_khr_int64_base_atomics"))
1024 call messages_new_line()
1025
1026#endif
1027
1028 call messages_info()
1029
1030
1031 pop_sub(accel_init.device_info)
1032 end subroutine device_info
1033
1034 end subroutine accel_init
1035
1036 ! ------------------------------------------
1037#ifdef HAVE_OPENCL
1038 integer function get_platform_id(platform_name) result(platform_id)
1039 character(len=*), intent(in) :: platform_name
1040
1041 platform_id = cl_plat_invalid
1042 if (index(platform_name, 'AMD') > 0) platform_id = cl_plat_amd
1043 if (index(platform_name, 'ATI') > 0) platform_id = cl_plat_ati
1044 if (index(platform_name, 'NVIDIA') > 0) platform_id = cl_plat_nvidia
1045 if (index(platform_name, 'Intel') > 0) platform_id = cl_plat_intel
1046 end function get_platform_id
1047#endif
1048 ! ------------------------------------------
1049
1050 subroutine accel_end(namespace)
1051 type(namespace_t), intent(in) :: namespace
1052
1053#ifdef HAVE_OPENCL
1054 integer :: ierr
1055#endif
1056 integer(int64) :: hits, misses
1057 real(real64) :: volume_hits, volume_misses
1058 logical :: found
1059 type(accel_mem_t) :: tmp
1060
1061 push_sub(accel_end)
1062
1063 if (accel_is_enabled()) then
1064
1065 ! Release global buffers
1066 call accel_release_buffer(zm_0_buffer)
1067 call accel_release_buffer(zm_1_buffer)
1068 call accel_release_buffer(dm_0_buffer)
1069 call accel_release_buffer(dm_1_buffer)
1070
1071 do
1072 call alloc_cache_get(memcache, alloc_cache_any_size, found, tmp%mem)
1073 if (.not. found) exit
1074
1075#ifdef HAVE_OPENCL
1076 call clreleasememobject(tmp%mem, ierr)
1077 if (ierr /= cl_success) call opencl_print_error(ierr, "clReleaseMemObject")
1078#endif
1079#ifdef HAVE_CUDA
1080 call cuda_mem_free(tmp%mem)
1081#endif
1082 end do
1083
1084 call alloc_cache_end(memcache, hits, misses, volume_hits, volume_misses)
1085
1086 call messages_print_with_emphasis(msg="Acceleration-device allocation cache", namespace=namespace)
1088 call messages_new_line()
1089 call messages_write(' Number of allocations =')
1090 call messages_write(hits + misses, new_line = .true.)
1091 call messages_write(' Volume of allocations =')
1092 call messages_write(volume_hits + volume_misses, fmt = 'f18.1', units = unit_gigabytes, align_left = .true., &
1093 new_line = .true.)
1094 call messages_write(' Hit ratio =')
1095 if (hits + misses > 0) then
1096 call messages_write(hits/real(hits + misses, real64)*100, fmt='(f6.1)', align_left = .true.)
1097 else
1098 call messages_write(m_zero, fmt='(f6.1)', align_left = .true.)
1099 end if
1100 call messages_write('%', new_line = .true.)
1101 call messages_write(' Volume hit ratio =')
1102 if (volume_hits + volume_misses > 0) then
1103 call messages_write(volume_hits/(volume_hits + volume_misses)*100, fmt='(f6.1)', align_left = .true.)
1104 else
1105 call messages_write(m_zero, fmt='(f6.1)', align_left = .true.)
1106 end if
1107 call messages_write('%')
1108 call messages_new_line()
1109 call messages_info()
1110
1111 call messages_print_with_emphasis(namespace=namespace)
1112 end if
1113
1115
1116#ifdef HAVE_CLBLAS
1117 call clblasteardown()
1118#endif
1119
1120#ifdef HAVE_CLFFT
1121 call clfftteardown()
1122#endif
1123
1124 if (accel_is_enabled()) then
1125#ifdef HAVE_CUDA
1126 call cublas_end(accel%cublas_handle)
1127 if (.not. accel%cuda_mpi) then ! CUDA aware MPI finalize will do the cleanup
1128 call cuda_end(accel%context%cuda_context, accel%device%cuda_device)
1129 end if
1130#endif
1131
1132#ifdef HAVE_OPENCL
1133 call clreleasecommandqueue(accel%command_queue, ierr)
1134
1135 if (ierr /= cl_success) call opencl_print_error(ierr, "ReleaseCommandQueue")
1136 call clreleasecontext(accel%context%cl_context, cl_status)
1137#endif
1138
1139 if (buffer_alloc_count /= 0) then
1140 call messages_write('Accel:')
1141 call messages_write(real(allocated_mem, real64) , fmt = 'f12.1', units = unit_megabytes, align_left = .true.)
1142 call messages_write(' in ')
1143 call messages_write(buffer_alloc_count)
1144 call messages_write(' buffers were not deallocated.')
1145 call messages_fatal()
1146 end if
1147
1148 end if
1149
1150 pop_sub(accel_end)
1151 end subroutine accel_end
1152
1153 ! ------------------------------------------
1155 integer(int64) function accel_padded_size_i8(nn) result(psize)
1156 integer(int64), intent(in) :: nn
1157
1158 integer(int64) :: modnn, bsize
1159
1160 psize = nn
1161
1162 if (accel_is_enabled()) then
1163
1164 bsize = accel_max_workgroup_size()
1165
1166 psize = nn
1167 modnn = mod(nn, bsize)
1168 if (modnn /= 0) psize = psize + bsize - modnn
1169
1170 end if
1171
1172 end function accel_padded_size_i8
1173
1174 ! ------------------------------------------
1175
1176 integer(int32) function accel_padded_size_i4(nn) result(psize)
1177 integer(int32), intent(in) :: nn
1178
1179 psize = int(accel_padded_size_i8(int(nn, int64)), int32)
1180
1181 end function accel_padded_size_i4
1182
1183 ! ------------------------------------------
1184
1185 subroutine accel_create_buffer_4(this, flags, type, size, set_zero, async)
1186 type(accel_mem_t), intent(inout) :: this
1187 integer, intent(in) :: flags
1188 type(type_t), intent(in) :: type
1189 integer, intent(in) :: size
1190 logical, optional, intent(in) :: set_zero
1191 logical, optional, intent(in) :: async
1192
1193 call accel_create_buffer_8(this, flags, type, int(size, int64), set_zero, async)
1194 end subroutine accel_create_buffer_4
1195
1196 ! ------------------------------------------
1197
1198 subroutine accel_create_buffer_8(this, flags, type, size, set_zero, async)
1199 type(accel_mem_t), intent(inout) :: this
1200 integer, intent(in) :: flags
1201 type(type_t), intent(in) :: type
1202 integer(int64), intent(in) :: size
1203 logical, optional, intent(in) :: set_zero
1204 logical, optional, intent(in) :: async
1205
1206 integer(int64) :: fsize
1207 logical :: found
1208 integer(int64) :: initialize_buffers
1209#ifdef HAVE_OPENCL
1210 integer :: ierr
1211#endif
1212
1213 push_sub(accel_create_buffer_8)
1214
1215 this%type = type
1216 this%size = size
1217 this%flags = flags
1218 fsize = int(size, int64)*types_get_size(type)
1219 this%allocated = .true.
1220
1221 if (fsize > 0) then
1222
1223 call alloc_cache_get(memcache, fsize, found, this%mem)
1224
1225 if (.not. found) then
1226#ifdef HAVE_OPENCL
1227 this%mem = clcreatebuffer(accel%context%cl_context, flags, fsize, ierr)
1228 if (ierr /= cl_success) call opencl_print_error(ierr, "clCreateBuffer")
1229#endif
1230#ifdef HAVE_CUDA
1231 if(optional_default(async, .false.)) then
1232 call cuda_mem_alloc_async(this%mem, fsize)
1233 else
1234 call cuda_mem_alloc(this%mem, fsize)
1235 end if
1236#endif
1237 end if
1238
1239 buffer_alloc_count = buffer_alloc_count + 1
1240 allocated_mem = allocated_mem + fsize
1241
1242 end if
1243
1244 if (present(set_zero)) then
1245 initialize_buffers = merge(option__initializegpubuffers__yes, option__initializegpubuffers__no, set_zero)
1246 else
1247 initialize_buffers = accel%initialize_buffers
1248 end if
1249 select case (initialize_buffers)
1250 case (option__initializegpubuffers__yes)
1251 call accel_set_buffer_to(this, type, int(z'00', int8), size)
1252 case (option__initializegpubuffers__nan)
1253 call accel_set_buffer_to(this, type, int(z'FF', int8), size)
1254 end select
1255
1256 pop_sub(accel_create_buffer_8)
1257 end subroutine accel_create_buffer_8
1258
1259 ! ------------------------------------------
1260
1261 subroutine accel_release_buffer(this, async)
1262 type(accel_mem_t), intent(inout) :: this
1263 logical, optional, intent(in) :: async
1264
1265#ifdef HAVE_OPENCL
1266 integer :: ierr
1267#endif
1268 logical :: put
1269 integer(int64) :: fsize
1270
1271 push_sub(accel_release_buffer)
1272
1273 if (this%size > 0) then
1274
1275 fsize = int(this%size, int64)*types_get_size(this%type)
1276
1277 call alloc_cache_put(memcache, fsize, this%mem, put)
1278
1279 if (.not. put) then
1280#ifdef HAVE_OPENCL
1281 call clreleasememobject(this%mem, ierr)
1282 if (ierr /= cl_success) call opencl_print_error(ierr, "clReleaseMemObject")
1283#endif
1284#ifdef HAVE_CUDA
1285 if (optional_default(async, .false.)) then
1286 call cuda_mem_free_async(this%mem)
1287 else
1288 call cuda_mem_free(this%mem)
1289 end if
1290#endif
1291 end if
1292
1293 buffer_alloc_count = buffer_alloc_count - 1
1294 allocated_mem = allocated_mem + fsize
1295
1296 end if
1297
1298 this%size = 0
1299 this%flags = 0
1300
1301 this%allocated = .false.
1302
1303 pop_sub(accel_release_buffer)
1304 end subroutine accel_release_buffer
1305
1306 ! ------------------------------------------------------
1307
1308 ! Check if the temporary buffers are the right size, if not reallocate them
1309 subroutine accel_ensure_buffer_size(buffer, flags, type, required_size, set_zero, async)
1310 type(accel_mem_t), intent(inout) :: buffer
1311 integer, intent(in) :: flags
1312 type(type_t), intent(in) :: type
1313 integer, intent(in) :: required_size
1314 logical, intent(in) :: set_zero
1315 logical, optional, intent(in) :: async
1316
1317 push_sub(accel_ensure_buffer_size)
1318
1319 if (buffer%size < required_size) then
1320 call accel_release_buffer(buffer, async=optional_default(async, .false.))
1321 call accel_create_buffer(buffer, flags, type, required_size, set_zero=set_zero, async=optional_default(async, .false.))
1322 end if
1323
1325 end subroutine accel_ensure_buffer_size
1326
1327 ! ------------------------------------------
1328
1329 logical pure function accel_buffer_is_allocated(this) result(allocated)
1330 type(accel_mem_t), intent(in) :: this
1331
1332 allocated = this%allocated
1333 end function accel_buffer_is_allocated
1334
1335 ! -----------------------------------------
1336
1337 subroutine accel_finish()
1338#ifdef HAVE_OPENCL
1339 integer :: ierr
1340#endif
1341
1342 ! no push_sub, called too frequently
1343
1344 if (accel_is_enabled()) then
1345#ifdef HAVE_OPENCL
1346 call clfinish(accel%command_queue, ierr)
1347 if (ierr /= cl_success) call opencl_print_error(ierr, 'clFinish')
1348#endif
1349#ifdef HAVE_CUDA
1351#endif
1352 end if
1353 end subroutine accel_finish
1354
1355 ! ------------------------------------------
1356
1357 subroutine accel_set_kernel_arg_buffer(kernel, narg, buffer)
1358 type(accel_kernel_t), intent(inout) :: kernel
1359 integer, intent(in) :: narg
1360 type(accel_mem_t), intent(in) :: buffer
1361
1362#ifdef HAVE_OPENCL
1363 integer :: ierr
1364#endif
1365
1366 assert(accel_buffer_is_allocated(buffer))
1367
1368 ! no push_sub, called too frequently
1369#ifdef HAVE_OPENCL
1370 call clsetkernelarg(kernel%kernel, narg, buffer%mem, ierr)
1371 if (ierr /= cl_success) call opencl_print_error(ierr, "clSetKernelArg_buf")
1372#endif
1373
1374#ifdef HAVE_CUDA
1375 call cuda_kernel_set_arg_buffer(kernel%arguments, buffer%mem, narg)
1376#endif
1377
1378 end subroutine accel_set_kernel_arg_buffer
1379
1380 ! ------------------------------------------
1381
1382 subroutine accel_set_kernel_arg_local(kernel, narg, type, size)
1383 type(accel_kernel_t), intent(inout) :: kernel
1384 integer, intent(in) :: narg
1385 type(type_t), intent(in) :: type
1386 integer, intent(in) :: size
1387
1388#ifdef HAVE_OPENCL
1389 integer :: ierr
1390#endif
1391 integer(int64) :: size_in_bytes
1392
1395
1396 size_in_bytes = int(size, int64)*types_get_size(type)
1397
1398 if (size_in_bytes > accel%local_memory_size) then
1399 write(message(1), '(a,f12.6,a)') "CL Error: requested local memory: ", real(size_in_bytes, real64) /1024.0, " Kb"
1400 write(message(2), '(a,f12.6,a)') " available local memory: ", real(accel%local_memory_size, real64) /1024.0, " Kb"
1401 call messages_fatal(2)
1402 else if (size_in_bytes <= 0) then
1403 write(message(1), '(a,i10)') "CL Error: invalid local memory size: ", size_in_bytes
1404 call messages_fatal(1)
1405 end if
1406
1407#ifdef HAVE_CUDA
1408 kernel%cuda_shared_mem = size_in_bytes
1409#endif
1411#ifdef HAVE_OPENCL
1412 call clsetkernelarglocal(kernel%kernel, narg, size_in_bytes, ierr)
1413 if (ierr /= cl_success) call opencl_print_error(ierr, "set_kernel_arg_local")
1414#endif
1415
1417 end subroutine accel_set_kernel_arg_local
1418
1419 ! ------------------------------------------
1420
1421 subroutine accel_kernel_run_8(kernel, globalsizes, localsizes)
1422 type(accel_kernel_t), intent(inout) :: kernel
1423 integer(int64), intent(in) :: globalsizes(:)
1424 integer(int64), intent(in) :: localsizes(:)
1425
1426 integer :: dim
1427#ifdef HAVE_OPENCL
1428 integer :: ierr
1429#endif
1430 integer(int64) :: gsizes(1:3)
1431 integer(int64) :: lsizes(1:3)
1432
1433 ! no push_sub, called too frequently
1434
1435 ! cuda needs all dimensions
1436 gsizes = 1
1437 lsizes = 1
1438
1439 dim = ubound(globalsizes, dim=1)
1440
1441 assert(dim == ubound(localsizes, dim=1))
1442
1443 ! if one size is zero, there is nothing to do
1444 if (any(globalsizes == 0)) return
1445
1446 assert(all(localsizes > 0))
1447 assert(all(localsizes <= accel_max_workgroup_size()))
1448 assert(all(mod(globalsizes, localsizes) == 0))
1449
1450 gsizes(1:dim) = globalsizes(1:dim)
1451 lsizes(1:dim) = localsizes(1:dim)
1453#ifdef HAVE_OPENCL
1454 call clenqueuendrangekernel(accel%command_queue, kernel%kernel, gsizes(1:dim), lsizes(1:dim), ierr)
1455 if (ierr /= cl_success) call opencl_print_error(ierr, "EnqueueNDRangeKernel")
1456#endif
1457
1458#ifdef HAVE_CUDA
1459 ! Maximum dimension of a block
1460 if (any(lsizes(1:3) > accel%max_block_dim(1:3))) then
1461 message(1) = "Maximum dimension of a block too large in kernel "//trim(kernel%kernel_name)
1462 message(2) = "The following conditions should be fulfilled:"
1463 write(message(3), "(A, I8, A, I8)") "Dim 1: ", lsizes(1), " <= ", accel%max_block_dim(1)
1464 write(message(4), "(A, I8, A, I8)") "Dim 2: ", lsizes(2), " <= ", accel%max_block_dim(2)
1465 write(message(5), "(A, I8, A, I8)") "Dim 3: ", lsizes(3), " <= ", accel%max_block_dim(3)
1466 message(6) = "This is an internal error, please contact the developers."
1467 call messages_fatal(6)
1468 end if
1469
1470
1471 ! Maximum number of threads per block
1472 if (product(lsizes) > accel_max_workgroup_size()) then
1473 message(1) = "Maximum number of threads per block too large in kernel "//trim(kernel%kernel_name)
1474 message(2) = "The following condition should be fulfilled:"
1475 write(message(3), "(I8, A, I8)") product(lsizes), " <= ", accel_max_workgroup_size()
1476 message(4) = "This is an internal error, please contact the developers."
1477 call messages_fatal(4)
1478 end if
1480 gsizes(1:3) = gsizes(1:3)/lsizes(1:3)
1481
1482 ! Maximum dimensions of the grid of thread block
1483 if (any(gsizes(1:3) > accel%max_grid_dim(1:3))) then
1484 message(1) = "Maximum dimension of grid too large in kernel "//trim(kernel%kernel_name)
1485 message(2) = "The following conditions should be fulfilled:"
1486 write(message(3), "(A, I8, A, I10)") "Dim 1: ", gsizes(1), " <= ", accel%max_grid_dim(1)
1487 write(message(4), "(A, I8, A, I10)") "Dim 2: ", gsizes(2), " <= ", accel%max_grid_dim(2)
1488 write(message(5), "(A, I8, A, I10)") "Dim 3: ", gsizes(3), " <= ", accel%max_grid_dim(3)
1489 message(6) = "This is an internal error, please contact the developers."
1490 call messages_fatal(6)
1491 end if
1492
1493 call cuda_launch_kernel(kernel%cuda_kernel, gsizes(1), lsizes(1), kernel%cuda_shared_mem, kernel%arguments)
1495 kernel%cuda_shared_mem = 0
1496#endif
1497
1498 end subroutine accel_kernel_run_8
1499
1500 ! -----------------------------------------------
1501
1502 subroutine accel_kernel_run_4(kernel, globalsizes, localsizes)
1503 type(accel_kernel_t), intent(inout) :: kernel
1504 integer, intent(in) :: globalsizes(:)
1505 integer, intent(in) :: localsizes(:)
1506
1507 call accel_kernel_run_8(kernel, int(globalsizes, int64), int(localsizes, int64))
1508
1509 end subroutine accel_kernel_run_4
1510
1511 ! -----------------------------------------------
1512
1513 integer pure function accel_max_workgroup_size() result(max_workgroup_size)
1514 max_workgroup_size = accel%max_workgroup_size
1515 end function accel_max_workgroup_size
1516
1517 ! -----------------------------------------------
1518
1519 integer function accel_kernel_workgroup_size(kernel) result(workgroup_size)
1520 type(accel_kernel_t), intent(inout) :: kernel
1521
1522#ifdef HAVE_OPENCL
1523 integer(int64) :: workgroup_size8
1524 integer :: ierr
1525#endif
1526#ifdef HAVE_CUDA
1527 integer :: max_workgroup_size
1528#endif
1529
1530 workgroup_size = 0
1531
1532#ifdef HAVE_OPENCL
1533 call clgetkernelworkgroupinfo(kernel%kernel, accel%device%cl_device, cl_kernel_work_group_size, workgroup_size8, ierr)
1534 if (ierr /= cl_success) call opencl_print_error(ierr, "EnqueueNDRangeKernel")
1535 workgroup_size = workgroup_size8
1536#endif
1537
1538#ifdef HAVE_CUDA
1539 call cuda_kernel_max_threads_per_block(kernel%cuda_kernel, max_workgroup_size)
1540 if (debug%info .and. max_workgroup_size /= accel%max_workgroup_size) then
1541 write(message(1), "(A, I5, A)") "A kernel can use only less threads per block (", workgroup_size, ")", &
1542 "than available on the device (", accel%max_workgroup_size, ")"
1543 call messages_info(1)
1544 end if
1545 ! recommended number of threads per block is 256 according to the CUDA best practice guide
1546 ! see https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#thread-and-block-heuristics
1547 workgroup_size = 256
1548 ! make sure we do not use more threads per block than available for this kernel
1549 workgroup_size = min(workgroup_size, max_workgroup_size)
1550#endif
1551
1552 end function accel_kernel_workgroup_size
1553
1554 ! -----------------------------------------------
1555
1556#ifdef HAVE_OPENCL
1557 subroutine opencl_build_program(prog, filename, flags)
1558 type(cl_program), intent(inout) :: prog
1559 character(len=*), intent(in) :: filename
1560 character(len=*), optional, intent(in) :: flags
1561
1562 character(len = 1000) :: string
1563 character(len = 256) :: share_string
1564 integer :: ierr, ierrlog, iunit, irec, newlen
1565
1566 push_sub(opencl_build_program)
1567
1568 string = '#include "'//trim(filename)//'"'
1569
1570 call messages_write("Building CL program '"//trim(filename)//"'.")
1571 call messages_info(debug_only=.true.)
1572
1573 prog = clcreateprogramwithsource(accel%context%cl_context, trim(string), ierr)
1574 if (ierr /= cl_success) call opencl_print_error(ierr, "clCreateProgramWithSource")
1575
1576 ! build the compilation flags
1577 string='-w'
1578 ! full optimization
1579 string=trim(string)//' -cl-denorms-are-zero'
1580 ! The following flag gives an error with the Xeon Phi
1581 ! string=trim(string)//' -cl-strict-aliasing'
1582 string=trim(string)//' -cl-mad-enable'
1583 string=trim(string)//' -cl-unsafe-math-optimizations'
1584 string=trim(string)//' -cl-finite-math-only'
1585 string=trim(string)//' -cl-fast-relaxed-math'
1586
1587 share_string='-I'//trim(conf%share)//'/opencl/'
1588
1589 if (f90_cl_device_has_extension(accel%device%cl_device, "cl_khr_fp64")) then
1590 string = trim(string)//' -DEXT_KHR_FP64'
1591 else if (f90_cl_device_has_extension(accel%device%cl_device, "cl_amd_fp64")) then
1592 string = trim(string)//' -DEXT_AMD_FP64'
1593 else
1594 call messages_write('Octopus requires an OpenCL device with double-precision support.')
1595 call messages_fatal()
1596 end if
1597
1598 if (accel_use_shared_mem()) then
1599 string = trim(string)//' -DSHARED_MEM'
1600 end if
1601
1602 if (present(flags)) then
1603 string = trim(string)//' '//trim(flags)
1604 end if
1606 call messages_write("Debug info: compilation flags '"//trim(string), new_line = .true.)
1607 call messages_write(' '//trim(share_string)//"'.")
1608 call messages_info(debug_only=.true.)
1609
1610 string = trim(string)//' '//trim(share_string)
1611
1612 call clbuildprogram(prog, trim(string), ierr)
1613
1614 if(ierr /= cl_success) then
1615 call clgetprogrambuildinfo(prog, accel%device%cl_device, cl_program_build_log, string, ierrlog)
1616 if (ierrlog /= cl_success) call opencl_print_error(ierrlog, "clGetProgramBuildInfo")
1617
1618 ! CL_PROGRAM_BUILD_LOG seems to have a useless '\n' in it
1619 newlen = scan(string, achar(010), back = .true.) - 1
1620 if (newlen >= 0) string = string(1:newlen)
1621
1622 if (len(trim(string)) > 0) write(stderr, '(a)') trim(string)
1623
1624 call opencl_print_error(ierr, "clBuildProgram")
1625 end if
1626
1627 pop_sub(opencl_build_program)
1628 end subroutine opencl_build_program
1629#endif
1630
1631 ! -----------------------------------------------
1632#ifdef HAVE_OPENCL
1633 subroutine opencl_release_program(prog)
1634 type(cl_program), intent(inout) :: prog
1635
1636 integer :: ierr
1637
1638 push_sub(opencl_release_program)
1639
1640 call clreleaseprogram(prog, ierr)
1641 if (ierr /= cl_success) call opencl_print_error(ierr, "clReleaseProgram")
1642
1643 pop_sub(opencl_release_program)
1644 end subroutine opencl_release_program
1645#endif
1646
1647 ! -----------------------------------------------
1648
1649#ifdef HAVE_OPENCL
1650 subroutine opencl_release_kernel(prog)
1651 type(cl_kernel), intent(inout) :: prog
1652
1653 integer :: ierr
1654
1655 push_sub(opencl_release_kernel)
1656
1657#ifdef HAVE_OPENCL
1658 call clreleasekernel(prog, ierr)
1659 if (ierr /= cl_success) call opencl_print_error(ierr, "clReleaseKernel")
1660#endif
1661
1662 pop_sub(opencl_release_kernel)
1663 end subroutine opencl_release_kernel
1664#endif
1665
1666#ifdef HAVE_OPENCL
1667 ! -----------------------------------------------
1668 subroutine opencl_create_kernel(kernel, prog, name)
1669 type(cl_kernel), intent(inout) :: kernel
1670 type(cl_program), intent(inout) :: prog
1671 character(len=*), intent(in) :: name
1672
1673 integer :: ierr
1674
1675 push_sub(opencl_create_kernel)
1676 call profiling_in("CL_BUILD_KERNEL", exclude = .true.)
1677
1678#ifdef HAVE_OPENCL
1679 kernel = clcreatekernel(prog, name, ierr)
1680 if (ierr /= cl_success) call opencl_print_error(ierr, "clCreateKernel")
1681#endif
1682
1683 call profiling_out("CL_BUILD_KERNEL")
1684 pop_sub(opencl_create_kernel)
1685 end subroutine opencl_create_kernel
1686#endif
1687
1688 ! ------------------------------------------------
1689#ifdef HAVE_OPENCL
1690 subroutine opencl_print_error(ierr, name)
1691 integer, intent(in) :: ierr
1692 character(len=*), intent(in) :: name
1693
1694 character(len=40) :: errcode
1695
1696 push_sub(opencl_print_error)
1697
1698 select case (ierr)
1699 case (cl_success); errcode = 'CL_SUCCESS '
1700 case (cl_device_not_found); errcode = 'CL_DEVICE_NOT_FOUND '
1701 case (cl_device_not_available); errcode = 'CL_DEVICE_NOT_AVAILABLE '
1702 case (cl_compiler_not_available); errcode = 'CL_COMPILER_NOT_AVAILABLE '
1703 case (cl_mem_object_allocation_failure); errcode = 'CL_MEM_OBJECT_ALLOCATION_FAILURE '
1704 case (cl_out_of_resources); errcode = 'CL_OUT_OF_RESOURCES '
1705 case (cl_out_of_host_memory); errcode = 'CL_OUT_OF_HOST_MEMORY '
1706 case (cl_profiling_info_not_available); errcode = 'CL_PROFILING_INFO_NOT_AVAILABLE '
1707 case (cl_mem_copy_overlap); errcode = 'CL_MEM_COPY_OVERLAP '
1708 case (cl_image_format_mismatch); errcode = 'CL_IMAGE_FORMAT_MISMATCH '
1709 case (cl_image_format_not_supported); errcode = 'CL_IMAGE_FORMAT_NOT_SUPPORTED '
1710 case (cl_build_program_failure); errcode = 'CL_BUILD_PROGRAM_FAILURE '
1711 case (cl_map_failure); errcode = 'CL_MAP_FAILURE '
1712 case (cl_invalid_value); errcode = 'CL_INVALID_VALUE '
1713 case (cl_invalid_device_type); errcode = 'CL_INVALID_DEVICE_TYPE '
1714 case (cl_invalid_platform); errcode = 'CL_INVALID_PLATFORM '
1715 case (cl_invalid_device); errcode = 'CL_INVALID_DEVICE '
1716 case (cl_invalid_context); errcode = 'CL_INVALID_CONTEXT '
1717 case (cl_invalid_queue_properties); errcode = 'CL_INVALID_QUEUE_PROPERTIES '
1718 case (cl_invalid_command_queue); errcode = 'CL_INVALID_COMMAND_QUEUE '
1719 case (cl_invalid_host_ptr); errcode = 'CL_INVALID_HOST_PTR '
1720 case (cl_invalid_mem_object); errcode = 'CL_INVALID_MEM_OBJECT '
1721 case (cl_invalid_image_format_descriptor); errcode = 'CL_INVALID_IMAGE_FORMAT_DESCRIPTOR '
1722 case (cl_invalid_image_size); errcode = 'CL_INVALID_IMAGE_SIZE '
1723 case (cl_invalid_sampler); errcode = 'CL_INVALID_SAMPLER '
1724 case (cl_invalid_binary); errcode = 'CL_INVALID_BINARY '
1725 case (cl_invalid_build_options); errcode = 'CL_INVALID_BUILD_OPTIONS '
1726 case (cl_invalid_program); errcode = 'CL_INVALID_PROGRAM '
1727 case (cl_invalid_program_executable); errcode = 'CL_INVALID_PROGRAM_EXECUTABLE '
1728 case (cl_invalid_kernel_name); errcode = 'CL_INVALID_KERNEL_NAME '
1729 case (cl_invalid_kernel_definition); errcode = 'CL_INVALID_KERNEL_DEFINITION '
1730 case (cl_invalid_kernel); errcode = 'CL_INVALID_KERNEL '
1731 case (cl_invalid_arg_index); errcode = 'CL_INVALID_ARG_INDEX '
1732 case (cl_invalid_arg_value); errcode = 'CL_INVALID_ARG_VALUE '
1733 case (cl_invalid_arg_size); errcode = 'CL_INVALID_ARG_SIZE '
1734 case (cl_invalid_kernel_args); errcode = 'CL_INVALID_KERNEL_ARGS '
1735 case (cl_invalid_work_dimension); errcode = 'CL_INVALID_WORK_DIMENSION '
1736 case (cl_invalid_work_group_size); errcode = 'CL_INVALID_WORK_GROUP_SIZE '
1737 case (cl_invalid_work_item_size); errcode = 'CL_INVALID_WORK_ITEM_SIZE '
1738 case (cl_invalid_global_offset); errcode = 'CL_INVALID_GLOBAL_OFFSET '
1739 case (cl_invalid_event_wait_list); errcode = 'CL_INVALID_EVENT_WAIT_LIST '
1740 case (cl_invalid_event); errcode = 'CL_INVALID_EVENT '
1741 case (cl_invalid_operation); errcode = 'CL_INVALID_OPERATION '
1742 case (cl_invalid_gl_object); errcode = 'CL_INVALID_GL_OBJECT '
1743 case (cl_invalid_buffer_size); errcode = 'CL_INVALID_BUFFER_SIZE '
1744 case (cl_invalid_mip_level); errcode = 'CL_INVALID_MIP_LEVEL '
1745 case (cl_invalid_global_work_size); errcode = 'CL_INVALID_GLOBAL_WORK_SIZE '
1746 case (cl_platform_not_found_khr); errcode = 'CL_PLATFORM_NOT_FOUND_KHR'
1747 case default
1748 write(errcode, '(i10)') ierr
1749 errcode = 'UNKNOWN ERROR CODE ('//trim(adjustl(errcode))//')'
1750 end select
1751
1752 message(1) = 'OpenCL '//trim(name)//' '//trim(errcode)
1753 call messages_fatal(1)
1754
1755 pop_sub(opencl_print_error)
1756 end subroutine opencl_print_error
1757#endif
1758
1759 ! ----------------------------------------------------
1760
1761 subroutine clblas_print_error(ierr, name)
1762 integer, intent(in) :: ierr
1763 character(len=*), intent(in) :: name
1764
1765 character(len=40) :: errcode
1766
1767 push_sub(clblas_print_error)
1768#if defined(HAVE_CLBLAS) || defined(HAVE_CLBLAST)
1769 select case (ierr)
1770 case (clblassuccess); errcode = 'clblasSuccess'
1771 case (clblasinvalidvalue); errcode = 'clblasInvalidValue'
1772 case (clblasinvalidcommandqueue); errcode = 'clblasInvalidCommandQueue'
1773 case (clblasinvalidcontext); errcode = 'clblasInvalidContext'
1774 case (clblasinvalidmemobject); errcode = 'clblasInvalidMemObject'
1775 case (clblasinvaliddevice); errcode = 'clblasInvalidDevice'
1776 case (clblasinvalideventwaitlist); errcode = 'clblasInvalidEventWaitList'
1777 case (clblasoutofresources); errcode = 'clblasOutOfResources'
1778 case (clblasoutofhostmemory); errcode = 'clblasOutOfHostMemory'
1779 case (clblasinvalidoperation); errcode = 'clblasInvalidOperation'
1780 case (clblascompilernotavailable); errcode = 'clblasCompilerNotAvailable'
1781 case (clblasbuildprogramfailure); errcode = 'clblasBuildProgramFailure'
1782 case (clblasnotimplemented); errcode = 'clblasNotImplemented'
1783 case (clblasnotinitialized); errcode = 'clblasNotInitialized'
1784 case (clblasinvalidmata); errcode = 'clblasInvalidMatA'
1785 case (clblasinvalidmatb); errcode = 'clblasInvalidMatB'
1786 case (clblasinvalidmatc); errcode = 'clblasInvalidMatC'
1787 case (clblasinvalidvecx); errcode = 'clblasInvalidVecX'
1788 case (clblasinvalidvecy); errcode = 'clblasInvalidVecY'
1789 case (clblasinvaliddim); errcode = 'clblasInvalidDim'
1790 case (clblasinvalidleaddima); errcode = 'clblasInvalidLeadDimA'
1791 case (clblasinvalidleaddimb); errcode = 'clblasInvalidLeadDimB'
1792 case (clblasinvalidleaddimc); errcode = 'clblasInvalidLeadDimC'
1793 case (clblasinvalidincx); errcode = 'clblasInvalidIncX'
1794 case (clblasinvalidincy); errcode = 'clblasInvalidIncY'
1795 case (clblasinsufficientmemmata); errcode = 'clblasInsufficientMemMatA'
1796 case (clblasinsufficientmemmatb); errcode = 'clblasInsufficientMemMatB'
1797 case (clblasinsufficientmemmatc); errcode = 'clblasInsufficientMemMatC'
1798 case (clblasinsufficientmemvecx); errcode = 'clblasInsufficientMemVecX'
1799 case (clblasinsufficientmemvecy); errcode = 'clblasInsufficientMemVecY'
1800#ifdef HAVE_CLBLAST
1801 case (clblastinsufficientmemorytemp); errcode = 'clblastInsufficientMemoryTemp'
1802 case (clblastinvalidbatchcount); errcode = 'clblastInvalidBatchCount'
1803 case (clblastinvalidoverridekernel); errcode = 'clblastInvalidOverrideKernel'
1804 case (clblastmissingoverrideparameter); errcode = 'clblastMissingOverrideParameter'
1805 case (clblastinvalidlocalmemusage); errcode = 'clblastInvalidLocalMemUsage'
1806 case (clblastnohalfprecision); errcode = 'clblastNoHalfPrecision'
1807 case (clblastnodoubleprecision); errcode = 'clblastNoDoublePrecision'
1808 case (clblastinvalidvectorscalar); errcode = 'clblastInvalidVectorScalar'
1809 case (clblastinsufficientmemoryscalar); errcode = 'clblastInsufficientMemoryScalar'
1810 case (clblastdatabaseerror); errcode = 'clblastDatabaseError'
1811 case (clblastunknownerror); errcode = 'clblastUnknownError'
1812 case (clblastunexpectederror); errcode = 'clblastUnexpectedError'
1813#endif
1814
1815 case default
1816 write(errcode, '(i10)') ierr
1817 errcode = 'UNKNOWN ERROR CODE ('//trim(adjustl(errcode))//')'
1818 end select
1819#endif
1820
1821 message(1) = 'Error in calling clblas routine '//trim(name)//' : '//trim(errcode)
1822 call messages_fatal(1)
1824 pop_sub(clblas_print_error)
1825 end subroutine clblas_print_error
1826
1827 ! ----------------------------------------------------
1828 subroutine clfft_print_error(ierr, name)
1829 integer, intent(in) :: ierr
1830 character(len=*), intent(in) :: name
1831
1832 character(len=40) :: errcode
1833
1834 push_sub(clfft_print_error)
1835#ifdef HAVE_CLFFT
1836 select case (ierr)
1837 case (clfft_invalid_global_work_size); errcode = 'CLFFT_INVALID_GLOBAL_WORK_SIZE'
1838 case (clfft_invalid_mip_level); errcode = 'CLFFT_INVALID_MIP_LEVEL'
1839 case (clfft_invalid_buffer_size); errcode = 'CLFFT_INVALID_BUFFER_SIZE'
1840 case (clfft_invalid_gl_object); errcode = 'CLFFT_INVALID_GL_OBJECT'
1841 case (clfft_invalid_operation); errcode = 'CLFFT_INVALID_OPERATION'
1842 case (clfft_invalid_event); errcode = 'CLFFT_INVALID_EVENT'
1843 case (clfft_invalid_event_wait_list); errcode = 'CLFFT_INVALID_EVENT_WAIT_LIST'
1844 case (clfft_invalid_global_offset); errcode = 'CLFFT_INVALID_GLOBAL_OFFSET'
1845 case (clfft_invalid_work_item_size); errcode = 'CLFFT_INVALID_WORK_ITEM_SIZE'
1846 case (clfft_invalid_work_group_size); errcode = 'CLFFT_INVALID_WORK_GROUP_SIZE'
1847 case (clfft_invalid_work_dimension); errcode = 'CLFFT_INVALID_WORK_DIMENSION'
1848 case (clfft_invalid_kernel_args); errcode = 'CLFFT_INVALID_KERNEL_ARGS'
1849 case (clfft_invalid_arg_size); errcode = 'CLFFT_INVALID_ARG_SIZE'
1850 case (clfft_invalid_arg_value); errcode = 'CLFFT_INVALID_ARG_VALUE'
1851 case (clfft_invalid_arg_index); errcode = 'CLFFT_INVALID_ARG_INDEX'
1852 case (clfft_invalid_kernel); errcode = 'CLFFT_INVALID_KERNEL'
1853 case (clfft_invalid_kernel_definition); errcode = 'CLFFT_INVALID_KERNEL_DEFINITION'
1854 case (clfft_invalid_kernel_name); errcode = 'CLFFT_INVALID_KERNEL_NAME'
1855 case (clfft_invalid_program_executable); errcode = 'CLFFT_INVALID_PROGRAM_EXECUTABLE'
1856 case (clfft_invalid_program); errcode = 'CLFFT_INVALID_PROGRAM'
1857 case (clfft_invalid_build_options); errcode = 'CLFFT_INVALID_BUILD_OPTIONS'
1858 case (clfft_invalid_binary); errcode = 'CLFFT_INVALID_BINARY'
1859 case (clfft_invalid_sampler); errcode = 'CLFFT_INVALID_SAMPLER'
1860 case (clfft_invalid_image_size); errcode = 'CLFFT_INVALID_IMAGE_SIZE'
1861 case (clfft_invalid_image_format_descriptor); errcode = 'CLFFT_INVALID_IMAGE_FORMAT_DESCRIPTOR'
1862 case (clfft_invalid_mem_object); errcode = 'CLFFT_INVALID_MEM_OBJECT'
1863 case (clfft_invalid_host_ptr); errcode = 'CLFFT_INVALID_HOST_PTR'
1864 case (clfft_invalid_command_queue); errcode = 'CLFFT_INVALID_COMMAND_QUEUE'
1865 case (clfft_invalid_queue_properties); errcode = 'CLFFT_INVALID_QUEUE_PROPERTIES'
1866 case (clfft_invalid_context); errcode = 'CLFFT_INVALID_CONTEXT'
1867 case (clfft_invalid_device); errcode = 'CLFFT_INVALID_DEVICE'
1868 case (clfft_invalid_platform); errcode = 'CLFFT_INVALID_PLATFORM'
1869 case (clfft_invalid_device_type); errcode = 'CLFFT_INVALID_DEVICE_TYPE'
1870 case (clfft_invalid_value); errcode = 'CLFFT_INVALID_VALUE'
1871 case (clfft_map_failure); errcode = 'CLFFT_MAP_FAILURE'
1872 case (clfft_build_program_failure); errcode = 'CLFFT_BUILD_PROGRAM_FAILURE'
1873 case (clfft_image_format_not_supported); errcode = 'CLFFT_IMAGE_FORMAT_NOT_SUPPORTED'
1874 case (clfft_image_format_mismatch); errcode = 'CLFFT_IMAGE_FORMAT_MISMATCH'
1875 case (clfft_mem_copy_overlap); errcode = 'CLFFT_MEM_COPY_OVERLAP'
1876 case (clfft_profiling_info_not_available); errcode = 'CLFFT_PROFILING_INFO_NOT_AVAILABLE'
1877 case (clfft_out_of_host_memory); errcode = 'CLFFT_OUT_OF_HOST_MEMORY'
1878 case (clfft_out_of_resources); errcode = 'CLFFT_OUT_OF_RESOURCES'
1879 case (clfft_mem_object_allocation_failure); errcode = 'CLFFT_MEM_OBJECT_ALLOCATION_FAILURE'
1880 case (clfft_compiler_not_available); errcode = 'CLFFT_COMPILER_NOT_AVAILABLE'
1881 case (clfft_device_not_available); errcode = 'CLFFT_DEVICE_NOT_AVAILABLE'
1882 case (clfft_device_not_found); errcode = 'CLFFT_DEVICE_NOT_FOUND'
1883 case (clfft_success); errcode = 'CLFFT_SUCCESS'
1884 case (clfft_bugcheck); errcode = 'CLFFT_BUGCHECK'
1885 case (clfft_notimplemented); errcode = 'CLFFT_NOTIMPLEMENTED'
1886 case (clfft_file_not_found); errcode = 'CLFFT_FILE_NOT_FOUND'
1887 case (clfft_file_create_failure); errcode = 'CLFFT_FILE_CREATE_FAILURE'
1888 case (clfft_version_mismatch); errcode = 'CLFFT_VERSION_MISMATCH'
1889 case (clfft_invalid_plan); errcode = 'CLFFT_INVALID_PLAN'
1890 case (clfft_device_no_double); errcode = 'CLFFT_DEVICE_NO_DOUBLE'
1891 case (clfft_endstatus); errcode = 'CLFFT_ENDSTATUS'
1892 case default
1893 write(errcode, '(i10)') ierr
1894 errcode = 'UNKNOWN ERROR CODE ('//trim(adjustl(errcode))//')'
1895 end select
1896#endif
1897
1898 message(1) = 'clfft '//trim(name)//' '//trim(errcode)
1899 call messages_fatal(1)
1900
1901 pop_sub(clfft_print_error)
1902 end subroutine clfft_print_error
1903
1904 ! ----------------------------------------------------
1905
1906#ifdef HAVE_OPENCL
1907 logical function f90_cl_device_has_extension(device, extension) result(has)
1908 type(cl_device_id), intent(inout) :: device
1909 character(len=*), intent(in) :: extension
1910
1911 integer :: cl_status
1912 character(len=2048) :: all_extensions
1913
1914#ifdef HAVE_OPENCL
1915 call clgetdeviceinfo(device, cl_device_extensions, all_extensions, cl_status)
1916#endif
1917
1918 has = index(all_extensions, extension) /= 0
1919
1920 end function f90_cl_device_has_extension
1921#endif
1922
1923 ! ----------------------------------------------------
1924
1925 subroutine accel_set_buffer_to(buffer, type, val, nval, offset, async)
1926 type(accel_mem_t), intent(inout) :: buffer
1927 type(type_t), intent(in) :: type
1928 integer(int8), intent(in) :: val
1929 integer(int64), intent(in) :: nval
1930 integer(int64), optional, intent(in) :: offset
1931 logical, optional, intent(in) :: async
1932
1933#ifdef HAVE_OPENCL
1934 integer :: ierr
1935#endif
1936 integer(int64) :: nval_, offset_, type_size
1937
1938 push_sub(accel_set_buffer_to)
1939
1940 if (nval == 0) then
1941 pop_sub(accel_set_buffer_to)
1942 return
1943 end if
1944 assert(nval > 0)
1945
1946 if (present(offset)) then
1947 assert(offset >= 0)
1948 if(offset > buffer%size) then
1949 pop_sub(accel_set_buffer_to)
1950 return
1951 end if
1952 end if
1953
1954 type_size = types_get_size(type)
1956 nval_ = nval*type_size
1957
1958 offset_ = 0_int64
1959 if (present(offset)) offset_ = offset*type_size
1960
1961#ifdef HAVE_OPENCL
1962 call clenqueuefillbuffer(accel%command_queue, buffer%mem, val, offset_, nval_, ierr)
1963 if (ierr /= cl_success) call opencl_print_error(ierr, "clEnqueueFillBuffer")
1964#else
1965 call cuda_mem_set_async(buffer%mem, val, nval_, offset_)
1966#endif
1967 if(.not. optional_default(async, .false.)) call accel_finish()
1968
1969 pop_sub(accel_set_buffer_to)
1970 end subroutine accel_set_buffer_to
1971
1972 ! ----------------------------------------------------
1973
1974 subroutine accel_set_buffer_to_zero_i8(buffer, type, nval, offset, async)
1975 type(accel_mem_t), intent(inout) :: buffer
1976 type(type_t), intent(in) :: type
1977 integer(int64), intent(in) :: nval
1978 integer(int64), optional, intent(in) :: offset
1979 logical, optional, intent(in) :: async
1980
1982
1983 call accel_set_buffer_to(buffer, type, int(z'00', int8), nval, offset, async)
1984
1986 end subroutine accel_set_buffer_to_zero_i8
1987
1988 ! ----------------------------------------------------
1989
1990 subroutine accel_set_buffer_to_zero_i4(buffer, type, nval, offset, async)
1991 type(accel_mem_t), intent(inout) :: buffer
1992 type(type_t), intent(in) :: type
1993 integer(int32), intent(in) :: nval
1994 integer(int32), optional, intent(in) :: offset
1995 logical, optional, intent(in) :: async
1996
1998
1999 if (present(offset)) then
2000 call accel_set_buffer_to_zero_i8(buffer, type, int(nval, int64), int(offset, int64), async=async)
2001 else
2002 call accel_set_buffer_to_zero_i8(buffer, type, int(nval, int64), async=async)
2003 end if
2004
2006 end subroutine accel_set_buffer_to_zero_i4
2007
2008 ! ----------------------------------------------------
2009
2010 subroutine opencl_check_bandwidth()
2011 integer :: itime
2012 integer, parameter :: times = 10
2013 integer :: size
2014 real(real64) :: time, stime
2015 real(real64) :: read_bw, write_bw
2016 type(accel_mem_t) :: buff
2017 real(real64), allocatable :: data(:)
2018
2019 call messages_new_line()
2020 call messages_write('Info: Benchmarking the bandwidth between main memory and device memory')
2021 call messages_new_line()
2022 call messages_info()
2023
2024 call messages_write(' Buffer size Read bw Write bw')
2025 call messages_new_line()
2026 call messages_write(' [MiB] [MiB/s] [MiB/s]')
2027 call messages_info()
2028
2029 size = 15000
2030 do
2031 safe_allocate(data(1:size))
2032 call accel_create_buffer(buff, accel_mem_read_write, type_float, size)
2033
2034 stime = loct_clock()
2035 do itime = 1, times
2036 call accel_write_buffer(buff, size, data)
2037 call accel_finish()
2038 end do
2039 time = (loct_clock() - stime)/real(times, real64)
2040
2041 write_bw = real(size, real64) *8.0_real64/time
2043 stime = loct_clock()
2044 do itime = 1, times
2045 call accel_read_buffer(buff, size, data)
2046 end do
2047 call accel_finish()
2048
2049 time = (loct_clock() - stime)/real(times, real64)
2050 read_bw = real(size, real64) *8.0_real64/time
2051
2052 call messages_write(size*8.0_real64/1024.0_real64**2)
2053 call messages_write(write_bw/1024.0_real64**2, fmt = '(f10.1)')
2054 call messages_write(read_bw/1024.0_real64**2, fmt = '(f10.1)')
2055 call messages_info()
2056
2057 call accel_release_buffer(buff)
2058
2059 safe_deallocate_a(data)
2060
2061 size = int(size*2.0)
2062
2063 if (size > 50000000) exit
2064 end do
2065 end subroutine opencl_check_bandwidth
2066
2067 ! ----------------------------------------------------
2068
2069 logical pure function accel_use_shared_mem() result(use_shared_mem)
2070
2071 use_shared_mem = accel%shared_mem
2072
2073 end function accel_use_shared_mem
2074
2075 !------------------------------------------------------------
2076
2077 subroutine accel_kernel_global_init()
2078
2079 push_sub(accel_kernel_global_init)
2080
2081 nullify(head)
2082
2083 call cuda_module_map_init(accel%module_map)
2084
2086 end subroutine accel_kernel_global_init
2087
2088 !------------------------------------------------------------
2089
2090 subroutine accel_kernel_global_end()
2091 type(accel_kernel_t), pointer :: next_head
2092
2093 push_sub(accel_kernel_global_end)
2094
2095 do
2096 if (.not. associated(head)) exit
2097 next_head => head%next
2099 head => next_head
2100 end do
2101
2102 if (accel_is_enabled()) then
2103 call cuda_module_map_end(accel%module_map)
2104 end if
2107 end subroutine accel_kernel_global_end
2108
2109 !------------------------------------------------------------
2110
2111 subroutine accel_kernel_build(this, file_name, kernel_name, flags)
2112 type(accel_kernel_t), intent(inout) :: this
2113 character(len=*), intent(in) :: file_name
2114 character(len=*), intent(in) :: kernel_name
2115 character(len=*), optional, intent(in) :: flags
2116
2117#ifdef HAVE_OPENCL
2118 type(cl_program) :: prog
2119#endif
2120#ifdef HAVE_CUDA
2121 character(len=1000) :: all_flags
2122#endif
2123
2124 push_sub(accel_kernel_build)
2125
2126 call profiling_in("ACCEL_COMPILE", exclude = .true.)
2128#ifdef HAVE_CUDA
2129 all_flags = '-I'//trim(conf%share)//'/opencl/'//" "//trim(accel%debug_flag)
2130
2131 if (accel_use_shared_mem()) then
2132 all_flags = trim(all_flags)//' -DSHARED_MEM'
2133 end if
2134
2135 if (present(flags)) then
2136 all_flags = trim(all_flags)//' '//trim(flags)
2137 end if
2138
2139 call cuda_build_program(accel%module_map, this%cuda_module, accel%device%cuda_device, &
2140 string_f_to_c(trim(file_name)), string_f_to_c(trim(all_flags)))
2141
2142 call cuda_create_kernel(this%cuda_kernel, this%cuda_module, string_f_to_c(trim(kernel_name)))
2143 call cuda_alloc_arg_array(this%arguments)
2144
2145 this%cuda_shared_mem = 0
2146#endif
2147
2148#ifdef HAVE_OPENCL
2149 call opencl_build_program(prog, trim(conf%share)//'/opencl/'//trim(file_name), flags = flags)
2150 call opencl_create_kernel(this%kernel, prog, trim(kernel_name))
2151 call opencl_release_program(prog)
2152#endif
2153
2154 this%initialized = .true.
2155 this%kernel_name = trim(kernel_name)
2156
2157 call profiling_out("ACCEL_COMPILE")
2158
2159 pop_sub(accel_kernel_build)
2160 end subroutine accel_kernel_build
2161
2162 !------------------------------------------------------------
2163
2164 subroutine accel_kernel_end(this)
2165 type(accel_kernel_t), intent(inout) :: this
2166#ifdef HAVE_OPENCL
2167 integer :: ierr
2168#endif
2169
2170 push_sub(accel_kernel_end)
2171
2172#ifdef HAVE_CUDA
2173 call cuda_free_arg_array(this%arguments)
2174 call cuda_release_kernel(this%cuda_kernel)
2175 ! modules are not released here, since they are not associated to a kernel
2176#endif
2177
2178#ifdef HAVE_OPENCL
2179 call clreleasekernel(this%kernel, ierr)
2180 if (ierr /= cl_success) call opencl_print_error(ierr, "release_kernel")
2181#endif
2182 this%initialized = .false.
2183
2184 pop_sub(accel_kernel_end)
2185 end subroutine accel_kernel_end
2186
2187 !------------------------------------------------------------
2188
2189 subroutine accel_kernel_start_call(this, file_name, kernel_name, flags)
2190 type(accel_kernel_t), target, intent(inout) :: this
2191 character(len=*), intent(in) :: file_name
2192 character(len=*), intent(in) :: kernel_name
2193 character(len=*), optional, intent(in) :: flags
2194
2196
2197 if (.not. this%initialized) then
2198 call accel_kernel_build(this, file_name, kernel_name, flags)
2199 this%next => head
2200 head => this
2201 end if
2202
2204 end subroutine accel_kernel_start_call
2205
2206 !--------------------------------------------------------------
2207
2208 integer(int64) pure function accel_global_memory_size() result(size)
2209
2210 size = accel%global_memory_size
2211
2212 end function accel_global_memory_size
2213
2214 !--------------------------------------------------------------
2215
2216 integer(int64) pure function accel_local_memory_size() result(size)
2217
2218 size = accel%local_memory_size
2219
2220 end function accel_local_memory_size
2221
2222 !--------------------------------------------------------------
2223
2224 integer pure function accel_max_size_per_dim(dim) result(size)
2225 integer, intent(in) :: dim
2226
2227 size = 0
2228#ifdef HAVE_OPENCL
2229 size = 32768 ! Setting here arbitrarily higher dimensions to 32768, as 2**30 leads to a
2230 ! value of zero when multiplied by 2048 and converted to integer 4.
2231 if (dim == 1) size = 2**30
2232#endif
2233#ifdef HAVE_CUDA
2234 size = 32768
2235 if (dim == 1) size = 2**30
2236#endif
2237 end function accel_max_size_per_dim
2238
2239 ! ------------------------------------------------------
2240
2241 subroutine accel_set_stream(stream_number)
2242 integer, intent(in) :: stream_number
2243
2244 push_sub(accel_set_stream)
2245
2246 if (accel_is_enabled()) then
2247#ifdef HAVE_CUDA
2248 call cuda_set_stream(accel%cuda_stream, stream_number)
2249 call cublas_set_stream(accel%cublas_handle, accel%cuda_stream)
2250#endif
2251 end if
2252
2253 pop_sub(accel_set_stream)
2254 end subroutine accel_set_stream
2255
2256 ! ------------------------------------------------------
2257
2258 subroutine accel_get_stream(stream_number)
2259 integer, intent(inout) :: stream_number
2260
2261 push_sub(accel_get_stream)
2262
2263 if (accel_is_enabled()) then
2264#ifdef HAVE_CUDA
2265 call cuda_get_stream(stream_number)
2266#endif
2267 end if
2268
2269 pop_sub(accel_get_stream)
2270 end subroutine accel_get_stream
2271
2272 ! ------------------------------------------------------
2273
2276
2277 if (accel_is_enabled()) then
2278#ifdef HAVE_CUDA
2279 call cuda_synchronize_all_streams()
2280#endif
2281 end if
2282
2284 end subroutine accel_synchronize_all_streams
2285
2286 function daccel_get_pointer_with_offset(buffer, offset) result(buffer_offset)
2287 type(c_ptr), intent(in) :: buffer
2288 integer(int64), intent(in) :: offset
2289 type(c_ptr) :: buffer_offset
2290
2292#ifdef HAVE_CUDA
2293 call cuda_get_pointer_with_offset(buffer, offset, buffer_offset)
2294#else
2295 ! this is needed to make the compiler happy for non-GPU compilations
2296 buffer_offset = buffer
2297#endif
2300
2301 function zaccel_get_pointer_with_offset(buffer, offset) result(buffer_offset)
2302 type(c_ptr), intent(in) :: buffer
2303 integer(int64), intent(in) :: offset
2304 type(c_ptr) :: buffer_offset
2305
2307#ifdef HAVE_CUDA
2308 call cuda_get_pointer_with_offset(buffer, 2_int64*offset, buffer_offset)
2309#else
2310 ! this is needed to make the compiler happy for non-GPU compilations
2311 buffer_offset = buffer
2312#endif
2315
2316 subroutine accel_clean_pointer(buffer)
2317 type(c_ptr), intent(in) :: buffer
2318
2319 push_sub(accel_clean_pointer)
2320#ifdef HAVE_CUDA
2321 call cuda_clean_pointer(buffer)
2322#endif
2323 pop_sub(accel_clean_pointer)
2324 end subroutine accel_clean_pointer
2325
2329 subroutine accel_get_unfolded_size(size, grid_size, thread_block_size)
2330 integer(int64), intent(in) :: size
2331 integer(int64), intent(out) :: grid_size
2332 integer(int64), intent(out) :: thread_block_size
2333
2334 push_sub(accel_get_unfolded_size)
2335#ifdef __HIP_PLATFORM_AMD__
2336 ! not benefitial for AMD chips
2337 grid_size = size
2338 thread_block_size = size
2339#else
2340 grid_size = size * accel%warp_size
2341 thread_block_size = accel%warp_size
2342#endif
2344 end subroutine accel_get_unfolded_size
2345
2346#include "undef.F90"
2347#include "real.F90"
2348#include "accel_inc.F90"
2349
2350#include "undef.F90"
2351#include "complex.F90"
2352#include "accel_inc.F90"
2353
2354#include "undef.F90"
2355#include "integer.F90"
2356#include "accel_inc.F90"
2357
2358#include "undef.F90"
2359#include "integer8.F90"
2360#include "accel_inc.F90"
2361
2362end module accel_oct_m
2363
2364!! Local Variables:
2365!! mode: f90
2366!! coding: utf-8
2367!! End:
subroutine device_info()
Definition: accel.F90:688
subroutine laccel_get_device_pointer_3l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:4340
integer, parameter opencl_accelerator
Definition: accel.F90:394
type(accel_kernel_t), target, save, public kernel_density_real
Definition: accel.F90:292
subroutine zaccel_get_device_pointer_2l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2873
integer, parameter opencl_default
Definition: accel.F90:394
type(accel_kernel_t), target, save, public kernel_vpsi_complex
Definition: accel.F90:279
type(accel_kernel_t), target, save, public dkernel_batch_axpy
Definition: accel.F90:300
subroutine, public accel_clean_pointer(buffer)
Definition: accel.F90:1495
subroutine accel_kernel_global_end()
Definition: accel.F90:1320
subroutine zaccel_write_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:2417
subroutine, public accel_get_unfolded_size(size, grid_size, thread_block_size)
Get unfolded size: some kernels (e.g. projectors) unfold the array across warps as an optimization....
Definition: accel.F90:1508
subroutine laccel_read_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:4080
subroutine iaccel_write_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:3108
pure logical function, public accel_allow_cpu_only()
Definition: accel.F90:429
subroutine daccel_get_device_pointer_1(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2060
logical pure function, public accel_use_shared_mem()
Definition: accel.F90:1299
subroutine zaccel_get_device_pointer_1(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2782
subroutine zaccel_create_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:2918
subroutine laccel_write_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:3862
subroutine daccel_write_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:1796
type(accel_kernel_t), target, save, public kernel_vpsi_spinors
Definition: accel.F90:280
subroutine laccel_get_device_pointer_1(host_pointer, device_pointer, dimensions)
Definition: accel.F90:4227
subroutine zaccel_read_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:2546
subroutine zaccel_write_buffer_single(this, data, async)
Definition: accel.F90:2315
subroutine daccel_read_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:1882
type(accel_kernel_t), target, save, public kernel_ghost_reorder
Definition: accel.F90:291
subroutine iaccel_get_device_pointer_3l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:3617
subroutine zaccel_read_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:2604
subroutine laccel_get_device_pointer_2(host_pointer, device_pointer, dimensions)
Definition: accel.F90:4251
subroutine iaccel_write_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:3240
type(accel_kernel_t), target, save, public zkernel_batch_axpy
Definition: accel.F90:301
subroutine zaccel_read_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:2635
integer, parameter cl_plat_nvidia
Definition: accel.F90:401
subroutine iaccel_write_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:3089
subroutine zaccel_release_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:2945
subroutine iaccel_write_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:3163
subroutine, public accel_kernel_start_call(this, file_name, kernel_name, flags)
Definition: accel.F90:1376
subroutine iaccel_write_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:3139
subroutine iaccel_release_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:3667
subroutine iaccel_read_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:3458
subroutine zaccel_write_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:2479
integer, parameter cl_plat_ati
Definition: accel.F90:401
subroutine, public accel_get_stream(stream_number)
Definition: accel.F90:1437
subroutine accel_create_buffer_4(this, flags, type, size, set_zero, async)
Definition: accel.F90:853
subroutine zaccel_read_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:2678
integer(int64) pure function, public accel_global_memory_size()
Definition: accel.F90:1395
subroutine laccel_write_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:3812
type(accel_kernel_t), target, save, public zkernel_ax_function_py
Definition: accel.F90:303
subroutine daccel_read_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:1863
subroutine daccel_write_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:1664
subroutine zaccel_set_kernel_arg_data(kernel, narg, data)
Definition: accel.F90:2765
subroutine daccel_get_device_pointer_3l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2173
subroutine iaccel_read_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:3357
subroutine accel_set_kernel_arg_local(kernel, narg, type, size)
Definition: accel.F90:1022
subroutine daccel_get_device_pointer_2l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2151
integer(int64) function accel_padded_size_i8(nn)
Definition: accel.F90:823
subroutine laccel_read_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:3991
subroutine daccel_write_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:1606
subroutine iaccel_create_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:3640
subroutine zaccel_read_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:2659
subroutine iaccel_get_device_pointer_1l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:3572
subroutine, public accel_finish()
Definition: accel.F90:985
subroutine opencl_check_bandwidth()
Definition: accel.F90:1240
subroutine accel_kernel_global_init()
Definition: accel.F90:1307
subroutine zaccel_read_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:2736
subroutine zaccel_write_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:2367
subroutine iaccel_get_device_pointer_1(host_pointer, device_pointer, dimensions)
Definition: accel.F90:3504
subroutine laccel_create_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:4363
subroutine, public accel_ensure_buffer_size(buffer, flags, type, required_size, set_zero, async)
Definition: accel.F90:957
type(accel_kernel_t), target, save, public zzmul
Definition: accel.F90:307
subroutine accel_set_buffer_to(buffer, type, val, nval, offset, async)
Definition: accel.F90:1155
subroutine laccel_set_kernel_arg_data(kernel, narg, data)
Definition: accel.F90:4210
subroutine daccel_write_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:1645
subroutine daccel_read_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:1937
subroutine zaccel_read_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:2585
subroutine daccel_read_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:1913
subroutine laccel_write_buffer_single(this, data, async)
Definition: accel.F90:3760
subroutine daccel_get_device_pointer_2(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2084
subroutine iaccel_read_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:3419
subroutine zaccel_write_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:2441
subroutine zaccel_write_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:2460
subroutine accel_set_buffer_to_zero_i8(buffer, type, nval, offset, async)
Definition: accel.F90:1204
subroutine zaccel_get_device_pointer_1l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2850
logical pure function, public accel_buffer_is_allocated(this)
Definition: accel.F90:977
integer, parameter, public accel_mem_read_write
Definition: accel.F90:196
subroutine, public clfft_print_error(ierr, name)
Definition: accel.F90:1137
subroutine daccel_create_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:2196
subroutine accel_kernel_end(this)
Definition: accel.F90:1361
type(accel_kernel_t), target, save, public dkernel_ax_function_py
Definition: accel.F90:302
subroutine laccel_write_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:3963
subroutine zaccel_get_device_pointer_2(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2806
subroutine daccel_write_buffer_3(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:1695
subroutine laccel_read_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:4142
type(c_ptr) function, public daccel_get_pointer_with_offset(buffer, offset)
Definition: accel.F90:1465
subroutine iaccel_write_buffer_single(this, data, async)
Definition: accel.F90:3037
subroutine iaccel_get_device_pointer_2(host_pointer, device_pointer, dimensions)
Definition: accel.F90:3528
integer pure function, public accel_max_size_per_dim(dim)
Definition: accel.F90:1411
subroutine iaccel_read_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:3268
subroutine daccel_read_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:1824
type(accel_kernel_t), target, save, public dzmul
Definition: accel.F90:306
subroutine iaccel_read_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:3400
subroutine laccel_write_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:3886
subroutine zaccel_write_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:2386
subroutine laccel_read_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:4104
subroutine laccel_get_device_pointer_2l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:4318
subroutine iaccel_get_device_pointer_3(host_pointer, device_pointer, dimensions)
Definition: accel.F90:3550
subroutine zaccel_write_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:2518
subroutine daccel_set_kernel_arg_data(kernel, narg, data)
Definition: accel.F90:2043
subroutine iaccel_read_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:3307
subroutine accel_kernel_run_8(kernel, globalsizes, localsizes)
Definition: accel.F90:1052
type(accel_kernel_t), target, save, public kernel_vpsi_spinors_complex
Definition: accel.F90:281
subroutine, public accel_kernel_build(this, file_name, kernel_name, flags)
Definition: accel.F90:1341
subroutine, public accel_init(base_grp, namespace)
Definition: accel.F90:439
subroutine, public accel_end(namespace)
Definition: accel.F90:746
subroutine laccel_write_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:3773
subroutine, public accel_synchronize_all_streams()
Definition: accel.F90:1453
subroutine, public accel_set_stream(stream_number)
Definition: accel.F90:1420
subroutine, public accel_release_buffer(this, async)
Definition: accel.F90:919
subroutine laccel_read_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:4049
type(accel_kernel_t), target, save, public zunpack
Definition: accel.F90:290
subroutine daccel_release_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:2223
subroutine laccel_get_device_pointer_1l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:4295
subroutine iaccel_read_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:3381
integer, parameter cl_plat_amd
Definition: accel.F90:401
integer(int32) function accel_padded_size_i4(nn)
Definition: accel.F90:844
subroutine accel_set_buffer_to_zero_i4(buffer, type, nval, offset, async)
Definition: accel.F90:1220
subroutine laccel_write_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:3905
subroutine iaccel_get_device_pointer_2l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:3595
subroutine iaccel_write_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:3201
pure logical function, public accel_is_enabled()
Definition: accel.F90:419
subroutine daccel_write_buffer_0_int32(this, n1, data, offset, async)
Definition: accel.F90:1719
subroutine daccel_write_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:1757
integer, parameter cl_plat_intel
Definition: accel.F90:401
subroutine iaccel_write_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:3182
integer, parameter, public accel_mem_write_only
Definition: accel.F90:196
subroutine daccel_read_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:2014
subroutine laccel_read_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:4123
subroutine daccel_read_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:1956
type(accel_kernel_t), target, save, public kernel_vpsi
Definition: accel.F90:278
subroutine daccel_get_device_pointer_1l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2128
subroutine accel_kernel_run_4(kernel, globalsizes, localsizes)
Definition: accel.F90:1088
subroutine laccel_release_blas_alpha_beta_buffer(this, data, async)
Definition: accel.F90:4390
subroutine iaccel_read_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:3326
subroutine laccel_write_buffer_2(this, n1, n2, data, offset, async)
Definition: accel.F90:3831
subroutine laccel_write_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:3924
subroutine zaccel_get_device_pointer_3(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2828
type(c_ptr) function, public zaccel_get_pointer_with_offset(buffer, offset)
Definition: accel.F90:1480
subroutine laccel_read_buffer_3_int32(this, n1, n2, n3, data, offset, async)
Definition: accel.F90:4181
subroutine daccel_write_buffer_single(this, data, async)
Definition: accel.F90:1593
subroutine daccel_write_buffer_1_int32(this, n1, data, offset, async)
Definition: accel.F90:1738
integer function, public accel_kernel_workgroup_size(kernel)
Definition: accel.F90:1105
integer, parameter opencl_cpu
Definition: accel.F90:394
subroutine zaccel_write_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:2328
subroutine, public clblas_print_error(ierr, name)
Definition: accel.F90:1122
type(accel_t), public accel
Definition: accel.F90:271
subroutine laccel_get_device_pointer_3(host_pointer, device_pointer, dimensions)
Definition: accel.F90:4273
subroutine iaccel_set_kernel_arg_data(kernel, narg, data)
Definition: accel.F90:3487
subroutine daccel_read_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:1975
subroutine iaccel_write_buffer_0(this, n1, data, offset, async)
Definition: accel.F90:3050
subroutine zaccel_get_device_pointer_3l(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2895
subroutine accel_create_buffer_8(this, flags, type, size, set_zero, async)
Definition: accel.F90:866
subroutine laccel_read_buffer_1(this, n1, data, offset, async)
Definition: accel.F90:4030
type(accel_kernel_t), target, save, public dunpack
Definition: accel.F90:289
integer(int64) pure function, public accel_local_memory_size()
Definition: accel.F90:1403
subroutine accel_set_kernel_arg_buffer(kernel, narg, buffer)
Definition: accel.F90:1005
integer pure function, public accel_max_workgroup_size()
Definition: accel.F90:1099
subroutine zaccel_read_buffer_2_int32(this, n1, n2, data, offset, async)
Definition: accel.F90:2697
subroutine daccel_get_device_pointer_3(host_pointer, device_pointer, dimensions)
Definition: accel.F90:2106
type(accel_kernel_t), pointer head
Definition: accel.F90:413
subroutine, public alloc_cache_put(alloc_cache, size, loc, put)
subroutine, public alloc_cache_get(alloc_cache, size, found, loc)
integer(int64), parameter, public alloc_cache_any_size
real(real64), parameter, public m_zero
Definition: global.F90:191
complex(real64), parameter, public m_z0
Definition: global.F90:201
complex(real64), parameter, public m_z1
Definition: global.F90:202
real(real64), parameter, public m_one
Definition: global.F90:192
subroutine, public loct_sysname(name)
Definition: loct.F90:321
This module is intended to contain "only mathematical" functions and procedures.
Definition: math.F90:117
subroutine, public messages_print_with_emphasis(msg, iunit, namespace)
Definition: messages.F90:898
character(len=512), private msg
Definition: messages.F90:167
subroutine, public messages_warning(no_lines, all_nodes, namespace)
Definition: messages.F90:525
subroutine, public messages_obsolete_variable(namespace, name, rep)
Definition: messages.F90:1023
subroutine, public messages_new_line()
Definition: messages.F90:1112
character(len=256), dimension(max_lines), public message
to be output by fatal, warning
Definition: messages.F90:162
subroutine, public messages_fatal(no_lines, only_root_writes, namespace)
Definition: messages.F90:410
subroutine, public messages_input_error(namespace, var, details, row, column)
Definition: messages.F90:691
subroutine, public messages_info(no_lines, iunit, debug_only, stress, all_nodes, namespace)
Definition: messages.F90:594
subroutine, public profiling_out(label)
Increment out counter and sum up difference between entry and exit time.
Definition: profiling.F90:625
subroutine, public profiling_in(label, exclude)
Increment in counter and save entry time.
Definition: profiling.F90:554
type(type_t), public type_float
Definition: types.F90:135
type(type_t), public type_cmplx
Definition: types.F90:136
integer pure function, public types_get_size(this)
Definition: types.F90:154
This module defines the unit system, used for input and output.
type(unit_t), public unit_gigabytes
For larger amounts of data (natural code units are bytes)
type(unit_t), public unit_megabytes
For large amounts of data (natural code units are bytes)
type(unit_t), public unit_kilobytes
For small amounts of data (natural code units are bytes)
int true(void)