diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 3df6b991..99fb890a 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -8,16 +8,28 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get update -y && DEBIAN_FRONTEND=noninter python3 \ ripgrep \ git \ - ltrace + ltrace \ + # required by llvm 17 + lsb-release software-properties-common gnupg + +ARG LLVM_VERSION=17 +RUN wget https://apt.llvm.org/llvm.sh && \ + chmod +x llvm.sh && \ + ./llvm.sh ${LLVM_VERSION} # Feel free to change to a newer version if you have a newer verison on your host -ARG CUDA_VERSION=12-4 +ARG CUDA_PKG_VERSION=12-4 # Docker <-> host driver version compatiblity is newer host <-> older docker # We don't care about a specific driver version, so pick oldest 5XX ARG CUDA_DRIVER=515 -RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ +RUN DEBIAN_FRONTEND=noninteractive apt-get update -y && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + # CUDA headers need it for interop + libgl-dev libegl-dev libvdpau-dev \ nvidia-utils-${CUDA_DRIVER} \ - cuda-cudart-${CUDA_VERSION} + cuda-cudart-dev-${CUDA_PKG_VERSION} \ + cuda-cudart-${CUDA_PKG_VERSION} \ + cuda-profiler-api-${CUDA_PKG_VERSION} \ + cuda-nvcc-${CUDA_PKG_VERSION} ARG ROCM_VERSION=6.2.2 RUN mkdir --parents --mode=0755 /etc/apt/keyrings && \ @@ -29,9 +41,11 @@ RUN mkdir --parents --mode=0755 /etc/apt/keyrings && \ rocminfo \ rocm-gdb \ rocm-smi-lib \ - hip-runtime-amd && \ + rocm-llvm-dev \ + hip-runtime-amd && \ + hip-dev && \ echo '/opt/rocm/lib' > /etc/ld.so.conf.d/rocm.conf && \ ldconfig -ENV PATH=$PATH:/opt/rocm-6.2.2/bin +ENV PATH=$PATH:/opt/rocm-${ROCM_VERSION}/bin diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 7cae35a0..34e88fbc 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -28,7 +28,7 @@ //"hostRequirements": { "gpu": "optional" } "customizations": { "vscode": { - "extensions": [ "mhutchie.git-graph" ], + "extensions": [ "mhutchie.git-graph" ] } -} + } } diff --git a/.gitmodules b/.gitmodules index e710202a..a4e5dc5e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,10 +1,3 @@ -[submodule "ext/spirv-tools"] - path = ext/spirv-tools - url = https://github.com/KhronosGroup/SPIRV-Tools - branch = master -[submodule "ext/spirv-headers"] - path = ext/spirv-headers - url = https://github.com/KhronosGroup/SPIRV-Headers [submodule "ext/llvm-project"] path = ext/llvm-project url = https://github.com/llvm/llvm-project diff --git a/Cargo.toml b/Cargo.toml index 8a7467a5..2e8edcd7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,27 +1,24 @@ -[workspace] - -resolver = "2" - -members = [ - "ext/hip_runtime-sys", - "ext/amd_comgr-sys", - "comgr", - "cuda_base", - "cuda_types", - "detours-sys", - "level_zero-sys", - "level_zero", - "spirv_tools-sys", - "zluda", - "zluda_dump", - "zluda_lib", - "zluda_inject", - "zluda_redirect", - "zluda_ml", - "ptx", - "ptx_parser", - "ptx_parser_macros", - "ptx_parser_macros_impl", -] - -default-members = ["zluda_lib", "zluda_ml", "zluda_inject", "zluda_redirect"] +[workspace] + +resolver = "2" + +members = [ + "ext/hip_runtime-sys", + "ext/amd_comgr-sys", + "comgr", + "cuda_base", + "cuda_types", + "detours-sys", + "zluda", + "zluda_dump", + "zluda_inject", + "zluda_redirect", + "zluda_ml", + "ptx", + "ptx_parser", + "ptx_parser_macros", + "ptx_parser_macros_impl", + "zluda_bindgen", +] + +default-members = ["zluda", "zluda_ml", "zluda_inject", "zluda_redirect"] diff --git a/cuda_base/Cargo.toml b/cuda_base/Cargo.toml index 997a426b..9c9d531a 100644 --- a/cuda_base/Cargo.toml +++ b/cuda_base/Cargo.toml @@ -2,11 +2,11 @@ name = "cuda_base" version = "0.0.0" authors = ["Andrzej Janik "] -edition = "2018" +edition = "2021" [dependencies] quote = "1.0" -syn = { version = "1.0", features = ["full", "visit-mut"] } +syn = { version = "2.0", features = ["full", "visit-mut"] } proc-macro2 = "1.0" rustc-hash = "1.1.0" diff --git a/cuda_base/README b/cuda_base/README deleted file mode 100644 index 4e0d60f7..00000000 --- a/cuda_base/README +++ /dev/null @@ -1 +0,0 @@ -bindgen build/wrapper.h -o src/cuda.rs --no-partialeq "CUDA_HOST_NODE_PARAMS_st" --with-derive-eq --whitelist-function="^cu.*" --whitelist-var="^CU.*" --size_t-is-usize --default-enum-style=newtype --no-layout-tests --no-doc-comments --no-derive-debug --new-type-alias "^CUdevice_v\d+$|^CUdeviceptr_v\d+$" --must-use-type "cudaError_enum" -- -I/usr/local/cuda/include \ No newline at end of file diff --git a/cuda_base/build/wrapper.h b/cuda_base/build/wrapper.h index beebe108..a5502560 100644 --- a/cuda_base/build/wrapper.h +++ b/cuda_base/build/wrapper.h @@ -1,3 +1,7 @@ #define __CUDA_API_VERSION_INTERNAL #include -#include \ No newline at end of file +#include +#include +#include +#include +#include diff --git a/cuda_base/src/cuda.rs b/cuda_base/src/cuda.rs index 6c141cf9..2cc5a562 100644 --- a/cuda_base/src/cuda.rs +++ b/cuda_base/src/cuda.rs @@ -1,5836 +1,20852 @@ -/* automatically generated by rust-bindgen 0.59.2 */ - -pub const CUDA_VERSION: u32 = 11050; -pub const CU_IPC_HANDLE_SIZE: u32 = 64; -pub const CU_MEMHOSTALLOC_PORTABLE: u32 = 1; -pub const CU_MEMHOSTALLOC_DEVICEMAP: u32 = 2; -pub const CU_MEMHOSTALLOC_WRITECOMBINED: u32 = 4; -pub const CU_MEMHOSTREGISTER_PORTABLE: u32 = 1; -pub const CU_MEMHOSTREGISTER_DEVICEMAP: u32 = 2; -pub const CU_MEMHOSTREGISTER_IOMEMORY: u32 = 4; -pub const CU_MEMHOSTREGISTER_READ_ONLY: u32 = 8; -pub const CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL: u32 = 1; -pub const CUDA_EXTERNAL_MEMORY_DEDICATED: u32 = 1; -pub const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC: u32 = 1; -pub const CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC: u32 = 2; -pub const CUDA_NVSCISYNC_ATTR_SIGNAL: u32 = 1; -pub const CUDA_NVSCISYNC_ATTR_WAIT: u32 = 2; -pub const CU_MEM_CREATE_USAGE_TILE_POOL: u32 = 1; -pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC: u32 = 1; -pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC: u32 = 2; -pub const CUDA_ARRAY3D_LAYERED: u32 = 1; -pub const CUDA_ARRAY3D_2DARRAY: u32 = 1; -pub const CUDA_ARRAY3D_SURFACE_LDST: u32 = 2; -pub const CUDA_ARRAY3D_CUBEMAP: u32 = 4; -pub const CUDA_ARRAY3D_TEXTURE_GATHER: u32 = 8; -pub const CUDA_ARRAY3D_DEPTH_TEXTURE: u32 = 16; -pub const CUDA_ARRAY3D_COLOR_ATTACHMENT: u32 = 32; -pub const CUDA_ARRAY3D_SPARSE: u32 = 64; -pub const CU_TRSA_OVERRIDE_FORMAT: u32 = 1; -pub const CU_TRSF_READ_AS_INTEGER: u32 = 1; -pub const CU_TRSF_NORMALIZED_COORDINATES: u32 = 2; -pub const CU_TRSF_SRGB: u32 = 16; -pub const CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION: u32 = 32; -pub const CU_PARAM_TR_DEFAULT: i32 = -1; -pub const CURRENT_IMPORT_REDIRECTION_VERSION: u32 = 1; -pub const CURVECAPS: u32 = 28; -pub const CURSOR_SHOWING: u32 = 1; -pub const CUR_BLOB_VERSION: u32 = 2; -pub type cuuint32_t = u32; -pub type cuuint64_t = u64; -#[repr(transparent)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUdeviceptr_v2(pub ::std::os::raw::c_ulonglong); -pub type CUdeviceptr = CUdeviceptr_v2; -#[repr(transparent)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUdevice_v1(pub ::std::os::raw::c_int); -pub type CUdevice = CUdevice_v1; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUctx_st { - _unused: [u8; 0], -} -pub type CUcontext = *mut CUctx_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmod_st { - _unused: [u8; 0], -} -pub type CUmodule = *mut CUmod_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUfunc_st { - _unused: [u8; 0], -} -pub type CUfunction = *mut CUfunc_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUarray_st { - _unused: [u8; 0], -} -pub type CUarray = *mut CUarray_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmipmappedArray_st { - _unused: [u8; 0], -} -pub type CUmipmappedArray = *mut CUmipmappedArray_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUtexref_st { - _unused: [u8; 0], -} -pub type CUtexref = *mut CUtexref_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUsurfref_st { - _unused: [u8; 0], -} -pub type CUsurfref = *mut CUsurfref_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUevent_st { - _unused: [u8; 0], -} -pub type CUevent = *mut CUevent_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUstream_st { - _unused: [u8; 0], -} -pub type CUstream = *mut CUstream_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraphicsResource_st { - _unused: [u8; 0], -} -pub type CUgraphicsResource = *mut CUgraphicsResource_st; -pub type CUtexObject_v1 = ::std::os::raw::c_ulonglong; -pub type CUtexObject = CUtexObject_v1; -pub type CUsurfObject_v1 = ::std::os::raw::c_ulonglong; -pub type CUsurfObject = CUsurfObject_v1; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUextMemory_st { - _unused: [u8; 0], -} -pub type CUexternalMemory = *mut CUextMemory_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUextSemaphore_st { - _unused: [u8; 0], -} -pub type CUexternalSemaphore = *mut CUextSemaphore_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraph_st { - _unused: [u8; 0], -} -pub type CUgraph = *mut CUgraph_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraphNode_st { - _unused: [u8; 0], -} -pub type CUgraphNode = *mut CUgraphNode_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraphExec_st { - _unused: [u8; 0], -} -pub type CUgraphExec = *mut CUgraphExec_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmemPoolHandle_st { - _unused: [u8; 0], -} -pub type CUmemoryPool = *mut CUmemPoolHandle_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUuserObject_st { - _unused: [u8; 0], -} -pub type CUuserObject = *mut CUuserObject_st; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUuuid_st { - pub bytes: [::std::os::raw::c_char; 16usize], -} -pub type CUuuid = CUuuid_st; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUipcEventHandle_st { - pub reserved: [::std::os::raw::c_char; 64usize], -} -pub type CUipcEventHandle_v1 = CUipcEventHandle_st; -pub type CUipcEventHandle = CUipcEventHandle_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUipcMemHandle_st { - pub reserved: [::std::os::raw::c_char; 64usize], -} -pub type CUipcMemHandle_v1 = CUipcMemHandle_st; -pub type CUipcMemHandle = CUipcMemHandle_v1; -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WAIT_VALUE_32: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(1); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WRITE_VALUE_32: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(2); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WAIT_VALUE_64: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(4); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WRITE_VALUE_64: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(5); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUstreamBatchMemOpType_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamBatchMemOpType_enum as CUstreamBatchMemOpType; -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamBatchMemOpParams_union { - pub operation: CUstreamBatchMemOpType, - pub waitValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st, - pub writeValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st, - pub flushRemoteWrites: CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st, - pub pad: [cuuint64_t; 6usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st { - pub operation: CUstreamBatchMemOpType, - pub address: CUdeviceptr, - pub __bindgen_anon_1: - CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub alias: CUdeviceptr, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 { - pub value: cuuint32_t, - pub value64: cuuint64_t, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st { - pub operation: CUstreamBatchMemOpType, - pub address: CUdeviceptr, - pub __bindgen_anon_1: - CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub alias: CUdeviceptr, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 { - pub value: cuuint32_t, - pub value64: cuuint64_t, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st { - pub operation: CUstreamBatchMemOpType, - pub flags: ::std::os::raw::c_uint, -} -pub type CUstreamBatchMemOpParams_v1 = CUstreamBatchMemOpParams_union; -pub type CUstreamBatchMemOpParams = CUstreamBatchMemOpParams_v1; -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNSIGNED_INT8: CUarray_format_enum = CUarray_format_enum(1); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNSIGNED_INT16: CUarray_format_enum = CUarray_format_enum(2); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNSIGNED_INT32: CUarray_format_enum = CUarray_format_enum(3); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SIGNED_INT8: CUarray_format_enum = CUarray_format_enum(8); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SIGNED_INT16: CUarray_format_enum = CUarray_format_enum(9); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SIGNED_INT32: CUarray_format_enum = CUarray_format_enum(10); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_HALF: CUarray_format_enum = CUarray_format_enum(16); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_FLOAT: CUarray_format_enum = CUarray_format_enum(32); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_NV12: CUarray_format_enum = CUarray_format_enum(176); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(192); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(193); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(194); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(195); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(196); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(197); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(198); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(199); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(200); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(201); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(202); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(203); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC1_UNORM: CUarray_format_enum = CUarray_format_enum(145); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC1_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(146); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC2_UNORM: CUarray_format_enum = CUarray_format_enum(147); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC2_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(148); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC3_UNORM: CUarray_format_enum = CUarray_format_enum(149); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC3_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(150); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC4_UNORM: CUarray_format_enum = CUarray_format_enum(151); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC4_SNORM: CUarray_format_enum = CUarray_format_enum(152); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC5_UNORM: CUarray_format_enum = CUarray_format_enum(153); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC5_SNORM: CUarray_format_enum = CUarray_format_enum(154); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC6H_UF16: CUarray_format_enum = CUarray_format_enum(155); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC6H_SF16: CUarray_format_enum = CUarray_format_enum(156); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC7_UNORM: CUarray_format_enum = CUarray_format_enum(157); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_BC7_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum(158); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUarray_format_enum(pub ::std::os::raw::c_uint); -pub use self::CUarray_format_enum as CUarray_format; -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_WRAP: CUaddress_mode_enum = CUaddress_mode_enum(0); -} -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_CLAMP: CUaddress_mode_enum = CUaddress_mode_enum(1); -} -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_MIRROR: CUaddress_mode_enum = CUaddress_mode_enum(2); -} -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_BORDER: CUaddress_mode_enum = CUaddress_mode_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUaddress_mode_enum(pub ::std::os::raw::c_uint); -pub use self::CUaddress_mode_enum as CUaddress_mode; -impl CUfilter_mode_enum { - pub const CU_TR_FILTER_MODE_POINT: CUfilter_mode_enum = CUfilter_mode_enum(0); -} -impl CUfilter_mode_enum { - pub const CU_TR_FILTER_MODE_LINEAR: CUfilter_mode_enum = CUfilter_mode_enum(1); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUfilter_mode_enum(pub ::std::os::raw::c_uint); -pub use self::CUfilter_mode_enum as CUfilter_mode; -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(1); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: CUdevice_attribute_enum = - CUdevice_attribute_enum(2); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: CUdevice_attribute_enum = - CUdevice_attribute_enum(3); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: CUdevice_attribute_enum = - CUdevice_attribute_enum(4); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: CUdevice_attribute_enum = - CUdevice_attribute_enum(5); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: CUdevice_attribute_enum = - CUdevice_attribute_enum(6); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: CUdevice_attribute_enum = - CUdevice_attribute_enum(7); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(8); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(8); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: CUdevice_attribute_enum = - CUdevice_attribute_enum(9); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_WARP_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(10); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum(11); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(12); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(12); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum(13); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: CUdevice_attribute_enum = - CUdevice_attribute_enum(14); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: CUdevice_attribute_enum = - CUdevice_attribute_enum(15); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: CUdevice_attribute_enum = - CUdevice_attribute_enum(16); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: CUdevice_attribute_enum = - CUdevice_attribute_enum(17); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_INTEGRATED: CUdevice_attribute_enum = CUdevice_attribute_enum(18); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: CUdevice_attribute_enum = - CUdevice_attribute_enum(19); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: CUdevice_attribute_enum = - CUdevice_attribute_enum(20); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(21); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(22); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(23); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(24); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(25); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(26); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(27); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(28); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(29); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(27); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(28); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES: CUdevice_attribute_enum = - CUdevice_attribute_enum(29); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT: CUdevice_attribute_enum = - CUdevice_attribute_enum(30); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: CUdevice_attribute_enum = - CUdevice_attribute_enum(31); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_ECC_ENABLED: CUdevice_attribute_enum = - CUdevice_attribute_enum(32); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(33); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: CUdevice_attribute_enum = - CUdevice_attribute_enum(34); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TCC_DRIVER: CUdevice_attribute_enum = CUdevice_attribute_enum(35); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(36); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(37); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: CUdevice_attribute_enum = - CUdevice_attribute_enum(38); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(39); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT: CUdevice_attribute_enum = - CUdevice_attribute_enum(40); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: CUdevice_attribute_enum = - CUdevice_attribute_enum(41); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(42); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(43); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER: CUdevice_attribute_enum = - CUdevice_attribute_enum(44); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(45); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(46); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(47); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(48); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(49); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: CUdevice_attribute_enum = - CUdevice_attribute_enum(50); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: CUdevice_attribute_enum = - CUdevice_attribute_enum(51); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(52); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(53); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(54); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(55); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(56); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(57); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(58); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(59); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(60); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(61); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(62); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(63); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(64); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(65); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(66); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(67); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(68); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(69); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(70); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(71); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: CUdevice_attribute_enum = - CUdevice_attribute_enum(72); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(73); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(74); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(75); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(76); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(77); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(78); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(79); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(80); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(81); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(82); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: CUdevice_attribute_enum = - CUdevice_attribute_enum(83); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: CUdevice_attribute_enum = - CUdevice_attribute_enum(84); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: CUdevice_attribute_enum = - CUdevice_attribute_enum(85); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(86); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: CUdevice_attribute_enum = - CUdevice_attribute_enum(87); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: CUdevice_attribute_enum = - CUdevice_attribute_enum(88); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: CUdevice_attribute_enum = - CUdevice_attribute_enum(89); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(90); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: CUdevice_attribute_enum = - CUdevice_attribute_enum(91); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS: CUdevice_attribute_enum = - CUdevice_attribute_enum(92); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS: CUdevice_attribute_enum = - CUdevice_attribute_enum(93); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(94); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH: CUdevice_attribute_enum = - CUdevice_attribute_enum(95); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH: CUdevice_attribute_enum = - CUdevice_attribute_enum(96); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: CUdevice_attribute_enum = - CUdevice_attribute_enum(97); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES: CUdevice_attribute_enum = - CUdevice_attribute_enum(98); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(99); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: - CUdevice_attribute_enum = CUdevice_attribute_enum(100); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: CUdevice_attribute_enum = - CUdevice_attribute_enum(101); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(102); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(102); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: - CUdevice_attribute_enum = CUdevice_attribute_enum(103); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(104); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(105); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(106); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(107); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: CUdevice_attribute_enum = - CUdevice_attribute_enum(108); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: CUdevice_attribute_enum = - CUdevice_attribute_enum(109); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(110); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(111); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(112); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(113); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(114); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(115); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(116); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS: CUdevice_attribute_enum = - CUdevice_attribute_enum(117); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING: CUdevice_attribute_enum = - CUdevice_attribute_enum(118); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES: CUdevice_attribute_enum = - CUdevice_attribute_enum(119); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX: CUdevice_attribute_enum = CUdevice_attribute_enum(120); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUdevice_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUdevice_attribute_enum as CUdevice_attribute; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUdevprop_st { - pub maxThreadsPerBlock: ::std::os::raw::c_int, - pub maxThreadsDim: [::std::os::raw::c_int; 3usize], - pub maxGridSize: [::std::os::raw::c_int; 3usize], - pub sharedMemPerBlock: ::std::os::raw::c_int, - pub totalConstantMemory: ::std::os::raw::c_int, - pub SIMDWidth: ::std::os::raw::c_int, - pub memPitch: ::std::os::raw::c_int, - pub regsPerBlock: ::std::os::raw::c_int, - pub clockRate: ::std::os::raw::c_int, - pub textureAlign: ::std::os::raw::c_int, -} -pub type CUdevprop_v1 = CUdevprop_st; -pub type CUdevprop = CUdevprop_v1; -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_CONTEXT: CUpointer_attribute_enum = CUpointer_attribute_enum(1); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_MEMORY_TYPE: CUpointer_attribute_enum = - CUpointer_attribute_enum(2); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_DEVICE_POINTER: CUpointer_attribute_enum = - CUpointer_attribute_enum(3); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_HOST_POINTER: CUpointer_attribute_enum = - CUpointer_attribute_enum(4); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_P2P_TOKENS: CUpointer_attribute_enum = - CUpointer_attribute_enum(5); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: CUpointer_attribute_enum = - CUpointer_attribute_enum(6); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_BUFFER_ID: CUpointer_attribute_enum = - CUpointer_attribute_enum(7); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_IS_MANAGED: CUpointer_attribute_enum = - CUpointer_attribute_enum(8); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: CUpointer_attribute_enum = - CUpointer_attribute_enum(9); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: CUpointer_attribute_enum = - CUpointer_attribute_enum(10); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: CUpointer_attribute_enum = - CUpointer_attribute_enum(11); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_RANGE_SIZE: CUpointer_attribute_enum = - CUpointer_attribute_enum(12); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_MAPPED: CUpointer_attribute_enum = CUpointer_attribute_enum(13); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: CUpointer_attribute_enum = - CUpointer_attribute_enum(14); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE: CUpointer_attribute_enum = - CUpointer_attribute_enum(15); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAGS: CUpointer_attribute_enum = - CUpointer_attribute_enum(16); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: CUpointer_attribute_enum = - CUpointer_attribute_enum(17); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUpointer_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUpointer_attribute_enum as CUpointer_attribute; -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUfunction_attribute_enum = - CUfunction_attribute_enum(0); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(1); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(2); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(3); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_NUM_REGS: CUfunction_attribute_enum = CUfunction_attribute_enum(4); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_PTX_VERSION: CUfunction_attribute_enum = - CUfunction_attribute_enum(5); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_BINARY_VERSION: CUfunction_attribute_enum = - CUfunction_attribute_enum(6); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_CACHE_MODE_CA: CUfunction_attribute_enum = - CUfunction_attribute_enum(7); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(8); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: CUfunction_attribute_enum = - CUfunction_attribute_enum(9); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_MAX: CUfunction_attribute_enum = CUfunction_attribute_enum(10); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUfunction_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUfunction_attribute_enum as CUfunction_attribute; -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_NONE: CUfunc_cache_enum = CUfunc_cache_enum(0); -} -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_SHARED: CUfunc_cache_enum = CUfunc_cache_enum(1); -} -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_L1: CUfunc_cache_enum = CUfunc_cache_enum(2); -} -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_EQUAL: CUfunc_cache_enum = CUfunc_cache_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUfunc_cache_enum(pub ::std::os::raw::c_uint); -pub use self::CUfunc_cache_enum as CUfunc_cache; -impl CUsharedconfig_enum { - pub const CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(0); -} -impl CUsharedconfig_enum { - pub const CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: CUsharedconfig_enum = - CUsharedconfig_enum(1); -} -impl CUsharedconfig_enum { - pub const CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: CUsharedconfig_enum = - CUsharedconfig_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUsharedconfig_enum(pub ::std::os::raw::c_uint); -pub use self::CUsharedconfig_enum as CUsharedconfig; -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_HOST: CUmemorytype_enum = CUmemorytype_enum(1); -} -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_DEVICE: CUmemorytype_enum = CUmemorytype_enum(2); -} -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_ARRAY: CUmemorytype_enum = CUmemorytype_enum(3); -} -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_UNIFIED: CUmemorytype_enum = CUmemorytype_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemorytype_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemorytype_enum as CUmemorytype; -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_SET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(1); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_UNSET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(2); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_SET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(3); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(4); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_SET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(5); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_UNSET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(6); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmem_advise_enum(pub ::std::os::raw::c_uint); -pub use self::CUmem_advise_enum as CUmem_advise; -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(1); -} -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(2); -} -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(3); -} -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmem_range_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUmem_range_attribute_enum as CUmem_range_attribute; -impl CUjit_option_enum { - pub const CU_JIT_MAX_REGISTERS: CUjit_option_enum = CUjit_option_enum(0); -} -impl CUjit_option_enum { - pub const CU_JIT_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(1); -} -impl CUjit_option_enum { - pub const CU_JIT_WALL_TIME: CUjit_option_enum = CUjit_option_enum(2); -} -impl CUjit_option_enum { - pub const CU_JIT_INFO_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(3); -} -impl CUjit_option_enum { - pub const CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(4); -} -impl CUjit_option_enum { - pub const CU_JIT_ERROR_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(5); -} -impl CUjit_option_enum { - pub const CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(6); -} -impl CUjit_option_enum { - pub const CU_JIT_OPTIMIZATION_LEVEL: CUjit_option_enum = CUjit_option_enum(7); -} -impl CUjit_option_enum { - pub const CU_JIT_TARGET_FROM_CUCONTEXT: CUjit_option_enum = CUjit_option_enum(8); -} -impl CUjit_option_enum { - pub const CU_JIT_TARGET: CUjit_option_enum = CUjit_option_enum(9); -} -impl CUjit_option_enum { - pub const CU_JIT_FALLBACK_STRATEGY: CUjit_option_enum = CUjit_option_enum(10); -} -impl CUjit_option_enum { - pub const CU_JIT_GENERATE_DEBUG_INFO: CUjit_option_enum = CUjit_option_enum(11); -} -impl CUjit_option_enum { - pub const CU_JIT_LOG_VERBOSE: CUjit_option_enum = CUjit_option_enum(12); -} -impl CUjit_option_enum { - pub const CU_JIT_GENERATE_LINE_INFO: CUjit_option_enum = CUjit_option_enum(13); -} -impl CUjit_option_enum { - pub const CU_JIT_CACHE_MODE: CUjit_option_enum = CUjit_option_enum(14); -} -impl CUjit_option_enum { - pub const CU_JIT_NEW_SM3X_OPT: CUjit_option_enum = CUjit_option_enum(15); -} -impl CUjit_option_enum { - pub const CU_JIT_FAST_COMPILE: CUjit_option_enum = CUjit_option_enum(16); -} -impl CUjit_option_enum { - pub const CU_JIT_GLOBAL_SYMBOL_NAMES: CUjit_option_enum = CUjit_option_enum(17); -} -impl CUjit_option_enum { - pub const CU_JIT_GLOBAL_SYMBOL_ADDRESSES: CUjit_option_enum = CUjit_option_enum(18); -} -impl CUjit_option_enum { - pub const CU_JIT_GLOBAL_SYMBOL_COUNT: CUjit_option_enum = CUjit_option_enum(19); -} -impl CUjit_option_enum { - pub const CU_JIT_LTO: CUjit_option_enum = CUjit_option_enum(20); -} -impl CUjit_option_enum { - pub const CU_JIT_FTZ: CUjit_option_enum = CUjit_option_enum(21); -} -impl CUjit_option_enum { - pub const CU_JIT_PREC_DIV: CUjit_option_enum = CUjit_option_enum(22); -} -impl CUjit_option_enum { - pub const CU_JIT_PREC_SQRT: CUjit_option_enum = CUjit_option_enum(23); -} -impl CUjit_option_enum { - pub const CU_JIT_FMA: CUjit_option_enum = CUjit_option_enum(24); -} -impl CUjit_option_enum { - pub const CU_JIT_NUM_OPTIONS: CUjit_option_enum = CUjit_option_enum(25); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUjit_option_enum(pub ::std::os::raw::c_uint); -pub use self::CUjit_option_enum as CUjit_option; -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_CUBIN: CUjitInputType_enum = CUjitInputType_enum(0); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_PTX: CUjitInputType_enum = CUjitInputType_enum(1); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_FATBINARY: CUjitInputType_enum = CUjitInputType_enum(2); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_OBJECT: CUjitInputType_enum = CUjitInputType_enum(3); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_LIBRARY: CUjitInputType_enum = CUjitInputType_enum(4); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_NVVM: CUjitInputType_enum = CUjitInputType_enum(5); -} -impl CUjitInputType_enum { - pub const CU_JIT_NUM_INPUT_TYPES: CUjitInputType_enum = CUjitInputType_enum(6); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUjitInputType_enum(pub ::std::os::raw::c_uint); -pub use self::CUjitInputType_enum as CUjitInputType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUlinkState_st { - _unused: [u8; 0], -} -pub type CUlinkState = *mut CUlinkState_st; -impl CUlimit_enum { - pub const CU_LIMIT_STACK_SIZE: CUlimit_enum = CUlimit_enum(0); -} -impl CUlimit_enum { - pub const CU_LIMIT_PRINTF_FIFO_SIZE: CUlimit_enum = CUlimit_enum(1); -} -impl CUlimit_enum { - pub const CU_LIMIT_MALLOC_HEAP_SIZE: CUlimit_enum = CUlimit_enum(2); -} -impl CUlimit_enum { - pub const CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: CUlimit_enum = CUlimit_enum(3); -} -impl CUlimit_enum { - pub const CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: CUlimit_enum = CUlimit_enum(4); -} -impl CUlimit_enum { - pub const CU_LIMIT_MAX_L2_FETCH_GRANULARITY: CUlimit_enum = CUlimit_enum(5); -} -impl CUlimit_enum { - pub const CU_LIMIT_PERSISTING_L2_CACHE_SIZE: CUlimit_enum = CUlimit_enum(6); -} -impl CUlimit_enum { - pub const CU_LIMIT_MAX: CUlimit_enum = CUlimit_enum(7); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUlimit_enum(pub ::std::os::raw::c_uint); -pub use self::CUlimit_enum as CUlimit; -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_ARRAY: CUresourcetype_enum = CUresourcetype_enum(0); -} -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: CUresourcetype_enum = CUresourcetype_enum(1); -} -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_LINEAR: CUresourcetype_enum = CUresourcetype_enum(2); -} -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_PITCH2D: CUresourcetype_enum = CUresourcetype_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUresourcetype_enum(pub ::std::os::raw::c_uint); -pub use self::CUresourcetype_enum as CUresourcetype; -pub type CUhostFn = - ::std::option::Option; -impl CUaccessProperty_enum { - pub const CU_ACCESS_PROPERTY_NORMAL: CUaccessProperty_enum = CUaccessProperty_enum(0); -} -impl CUaccessProperty_enum { - pub const CU_ACCESS_PROPERTY_STREAMING: CUaccessProperty_enum = CUaccessProperty_enum(1); -} -impl CUaccessProperty_enum { - pub const CU_ACCESS_PROPERTY_PERSISTING: CUaccessProperty_enum = CUaccessProperty_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUaccessProperty_enum(pub ::std::os::raw::c_uint); -pub use self::CUaccessProperty_enum as CUaccessProperty; -#[repr(C)] -#[derive(Copy, Clone, PartialEq)] -pub struct CUaccessPolicyWindow_st { - pub base_ptr: *mut ::std::os::raw::c_void, - pub num_bytes: usize, - pub hitRatio: f32, - pub hitProp: CUaccessProperty, - pub missProp: CUaccessProperty, -} -pub type CUaccessPolicyWindow_v1 = CUaccessPolicyWindow_st; -pub type CUaccessPolicyWindow = CUaccessPolicyWindow_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_KERNEL_NODE_PARAMS_st { - pub func: CUfunction, - pub gridDimX: ::std::os::raw::c_uint, - pub gridDimY: ::std::os::raw::c_uint, - pub gridDimZ: ::std::os::raw::c_uint, - pub blockDimX: ::std::os::raw::c_uint, - pub blockDimY: ::std::os::raw::c_uint, - pub blockDimZ: ::std::os::raw::c_uint, - pub sharedMemBytes: ::std::os::raw::c_uint, - pub kernelParams: *mut *mut ::std::os::raw::c_void, - pub extra: *mut *mut ::std::os::raw::c_void, -} -pub type CUDA_KERNEL_NODE_PARAMS_v1 = CUDA_KERNEL_NODE_PARAMS_st; -pub type CUDA_KERNEL_NODE_PARAMS = CUDA_KERNEL_NODE_PARAMS_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_MEMSET_NODE_PARAMS_st { - pub dst: CUdeviceptr, - pub pitch: usize, - pub value: ::std::os::raw::c_uint, - pub elementSize: ::std::os::raw::c_uint, - pub width: usize, - pub height: usize, -} -pub type CUDA_MEMSET_NODE_PARAMS_v1 = CUDA_MEMSET_NODE_PARAMS_st; -pub type CUDA_MEMSET_NODE_PARAMS = CUDA_MEMSET_NODE_PARAMS_v1; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_HOST_NODE_PARAMS_st { - pub fn_: CUhostFn, - pub userData: *mut ::std::os::raw::c_void, -} -pub type CUDA_HOST_NODE_PARAMS_v1 = CUDA_HOST_NODE_PARAMS_st; -pub type CUDA_HOST_NODE_PARAMS = CUDA_HOST_NODE_PARAMS_v1; -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_KERNEL: CUgraphNodeType_enum = CUgraphNodeType_enum(0); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_MEMCPY: CUgraphNodeType_enum = CUgraphNodeType_enum(1); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_MEMSET: CUgraphNodeType_enum = CUgraphNodeType_enum(2); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_HOST: CUgraphNodeType_enum = CUgraphNodeType_enum(3); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_GRAPH: CUgraphNodeType_enum = CUgraphNodeType_enum(4); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_EMPTY: CUgraphNodeType_enum = CUgraphNodeType_enum(5); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_WAIT_EVENT: CUgraphNodeType_enum = CUgraphNodeType_enum(6); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_EVENT_RECORD: CUgraphNodeType_enum = CUgraphNodeType_enum(7); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL: CUgraphNodeType_enum = CUgraphNodeType_enum(8); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT: CUgraphNodeType_enum = CUgraphNodeType_enum(9); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_MEM_ALLOC: CUgraphNodeType_enum = CUgraphNodeType_enum(10); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_MEM_FREE: CUgraphNodeType_enum = CUgraphNodeType_enum(11); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUgraphNodeType_enum(pub ::std::os::raw::c_uint); -pub use self::CUgraphNodeType_enum as CUgraphNodeType; -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_AUTO: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(1); -} -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_SPIN: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(2); -} -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_YIELD: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(3); -} -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_BLOCKING_SYNC: CUsynchronizationPolicy_enum = - CUsynchronizationPolicy_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUsynchronizationPolicy_enum(pub ::std::os::raw::c_uint); -pub use self::CUsynchronizationPolicy_enum as CUsynchronizationPolicy; -impl CUkernelNodeAttrID_enum { - pub const CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW: CUkernelNodeAttrID_enum = - CUkernelNodeAttrID_enum(1); -} -impl CUkernelNodeAttrID_enum { - pub const CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE: CUkernelNodeAttrID_enum = - CUkernelNodeAttrID_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUkernelNodeAttrID_enum(pub ::std::os::raw::c_uint); -pub use self::CUkernelNodeAttrID_enum as CUkernelNodeAttrID; -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUkernelNodeAttrValue_union { - pub accessPolicyWindow: CUaccessPolicyWindow, - pub cooperative: ::std::os::raw::c_int, -} -pub type CUkernelNodeAttrValue_v1 = CUkernelNodeAttrValue_union; -pub type CUkernelNodeAttrValue = CUkernelNodeAttrValue_v1; -impl CUstreamCaptureStatus_enum { - pub const CU_STREAM_CAPTURE_STATUS_NONE: CUstreamCaptureStatus_enum = - CUstreamCaptureStatus_enum(0); -} -impl CUstreamCaptureStatus_enum { - pub const CU_STREAM_CAPTURE_STATUS_ACTIVE: CUstreamCaptureStatus_enum = - CUstreamCaptureStatus_enum(1); -} -impl CUstreamCaptureStatus_enum { - pub const CU_STREAM_CAPTURE_STATUS_INVALIDATED: CUstreamCaptureStatus_enum = - CUstreamCaptureStatus_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUstreamCaptureStatus_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamCaptureStatus_enum as CUstreamCaptureStatus; -impl CUstreamCaptureMode_enum { - pub const CU_STREAM_CAPTURE_MODE_GLOBAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(0); -} -impl CUstreamCaptureMode_enum { - pub const CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: CUstreamCaptureMode_enum = - CUstreamCaptureMode_enum(1); -} -impl CUstreamCaptureMode_enum { - pub const CU_STREAM_CAPTURE_MODE_RELAXED: CUstreamCaptureMode_enum = - CUstreamCaptureMode_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUstreamCaptureMode_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamCaptureMode_enum as CUstreamCaptureMode; -impl CUstreamAttrID_enum { - pub const CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW: CUstreamAttrID_enum = - CUstreamAttrID_enum(1); -} -impl CUstreamAttrID_enum { - pub const CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY: CUstreamAttrID_enum = - CUstreamAttrID_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUstreamAttrID_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamAttrID_enum as CUstreamAttrID; -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamAttrValue_union { - pub accessPolicyWindow: CUaccessPolicyWindow, - pub syncPolicy: CUsynchronizationPolicy, -} -pub type CUstreamAttrValue_v1 = CUstreamAttrValue_union; -pub type CUstreamAttrValue = CUstreamAttrValue_v1; -impl CUexecAffinityType_enum { - pub const CU_EXEC_AFFINITY_TYPE_SM_COUNT: CUexecAffinityType_enum = CUexecAffinityType_enum(0); -} -impl CUexecAffinityType_enum { - pub const CU_EXEC_AFFINITY_TYPE_MAX: CUexecAffinityType_enum = CUexecAffinityType_enum(1); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUexecAffinityType_enum(pub ::std::os::raw::c_uint); -pub use self::CUexecAffinityType_enum as CUexecAffinityType; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUexecAffinitySmCount_st { - pub val: ::std::os::raw::c_uint, -} -pub type CUexecAffinitySmCount_v1 = CUexecAffinitySmCount_st; -pub type CUexecAffinitySmCount = CUexecAffinitySmCount_v1; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUexecAffinityParam_st { - pub type_: CUexecAffinityType, - pub param: CUexecAffinityParam_st__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUexecAffinityParam_st__bindgen_ty_1 { - pub smCount: CUexecAffinitySmCount, -} -pub type CUexecAffinityParam_v1 = CUexecAffinityParam_st; -pub type CUexecAffinityParam = CUexecAffinityParam_v1; -impl cudaError_enum { - pub const CUDA_SUCCESS: cudaError_enum = cudaError_enum(0); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_VALUE: cudaError_enum = cudaError_enum(1); -} -impl cudaError_enum { - pub const CUDA_ERROR_OUT_OF_MEMORY: cudaError_enum = cudaError_enum(2); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_INITIALIZED: cudaError_enum = cudaError_enum(3); -} -impl cudaError_enum { - pub const CUDA_ERROR_DEINITIALIZED: cudaError_enum = cudaError_enum(4); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_DISABLED: cudaError_enum = cudaError_enum(5); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_NOT_INITIALIZED: cudaError_enum = cudaError_enum(6); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_ALREADY_STARTED: cudaError_enum = cudaError_enum(7); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_ALREADY_STOPPED: cudaError_enum = cudaError_enum(8); -} -impl cudaError_enum { - pub const CUDA_ERROR_STUB_LIBRARY: cudaError_enum = cudaError_enum(34); -} -impl cudaError_enum { - pub const CUDA_ERROR_NO_DEVICE: cudaError_enum = cudaError_enum(100); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_DEVICE: cudaError_enum = cudaError_enum(101); -} -impl cudaError_enum { - pub const CUDA_ERROR_DEVICE_NOT_LICENSED: cudaError_enum = cudaError_enum(102); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_IMAGE: cudaError_enum = cudaError_enum(200); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_CONTEXT: cudaError_enum = cudaError_enum(201); -} -impl cudaError_enum { - pub const CUDA_ERROR_CONTEXT_ALREADY_CURRENT: cudaError_enum = cudaError_enum(202); -} -impl cudaError_enum { - pub const CUDA_ERROR_MAP_FAILED: cudaError_enum = cudaError_enum(205); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNMAP_FAILED: cudaError_enum = cudaError_enum(206); -} -impl cudaError_enum { - pub const CUDA_ERROR_ARRAY_IS_MAPPED: cudaError_enum = cudaError_enum(207); -} -impl cudaError_enum { - pub const CUDA_ERROR_ALREADY_MAPPED: cudaError_enum = cudaError_enum(208); -} -impl cudaError_enum { - pub const CUDA_ERROR_NO_BINARY_FOR_GPU: cudaError_enum = cudaError_enum(209); -} -impl cudaError_enum { - pub const CUDA_ERROR_ALREADY_ACQUIRED: cudaError_enum = cudaError_enum(210); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_MAPPED: cudaError_enum = cudaError_enum(211); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_MAPPED_AS_ARRAY: cudaError_enum = cudaError_enum(212); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_MAPPED_AS_POINTER: cudaError_enum = cudaError_enum(213); -} -impl cudaError_enum { - pub const CUDA_ERROR_ECC_UNCORRECTABLE: cudaError_enum = cudaError_enum(214); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNSUPPORTED_LIMIT: cudaError_enum = cudaError_enum(215); -} -impl cudaError_enum { - pub const CUDA_ERROR_CONTEXT_ALREADY_IN_USE: cudaError_enum = cudaError_enum(216); -} -impl cudaError_enum { - pub const CUDA_ERROR_PEER_ACCESS_UNSUPPORTED: cudaError_enum = cudaError_enum(217); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_PTX: cudaError_enum = cudaError_enum(218); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_GRAPHICS_CONTEXT: cudaError_enum = cudaError_enum(219); -} -impl cudaError_enum { - pub const CUDA_ERROR_NVLINK_UNCORRECTABLE: cudaError_enum = cudaError_enum(220); -} -impl cudaError_enum { - pub const CUDA_ERROR_JIT_COMPILER_NOT_FOUND: cudaError_enum = cudaError_enum(221); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNSUPPORTED_PTX_VERSION: cudaError_enum = cudaError_enum(222); -} -impl cudaError_enum { - pub const CUDA_ERROR_JIT_COMPILATION_DISABLED: cudaError_enum = cudaError_enum(223); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY: cudaError_enum = cudaError_enum(224); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_SOURCE: cudaError_enum = cudaError_enum(300); -} -impl cudaError_enum { - pub const CUDA_ERROR_FILE_NOT_FOUND: cudaError_enum = cudaError_enum(301); -} -impl cudaError_enum { - pub const CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: cudaError_enum = cudaError_enum(302); -} -impl cudaError_enum { - pub const CUDA_ERROR_SHARED_OBJECT_INIT_FAILED: cudaError_enum = cudaError_enum(303); -} -impl cudaError_enum { - pub const CUDA_ERROR_OPERATING_SYSTEM: cudaError_enum = cudaError_enum(304); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_HANDLE: cudaError_enum = cudaError_enum(400); -} -impl cudaError_enum { - pub const CUDA_ERROR_ILLEGAL_STATE: cudaError_enum = cudaError_enum(401); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_FOUND: cudaError_enum = cudaError_enum(500); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_READY: cudaError_enum = cudaError_enum(600); -} -impl cudaError_enum { - pub const CUDA_ERROR_ILLEGAL_ADDRESS: cudaError_enum = cudaError_enum(700); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: cudaError_enum = cudaError_enum(701); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_TIMEOUT: cudaError_enum = cudaError_enum(702); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING: cudaError_enum = cudaError_enum(703); -} -impl cudaError_enum { - pub const CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED: cudaError_enum = cudaError_enum(704); -} -impl cudaError_enum { - pub const CUDA_ERROR_PEER_ACCESS_NOT_ENABLED: cudaError_enum = cudaError_enum(705); -} -impl cudaError_enum { - pub const CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE: cudaError_enum = cudaError_enum(708); -} -impl cudaError_enum { - pub const CUDA_ERROR_CONTEXT_IS_DESTROYED: cudaError_enum = cudaError_enum(709); -} -impl cudaError_enum { - pub const CUDA_ERROR_ASSERT: cudaError_enum = cudaError_enum(710); -} -impl cudaError_enum { - pub const CUDA_ERROR_TOO_MANY_PEERS: cudaError_enum = cudaError_enum(711); -} -impl cudaError_enum { - pub const CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED: cudaError_enum = cudaError_enum(712); -} -impl cudaError_enum { - pub const CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED: cudaError_enum = cudaError_enum(713); -} -impl cudaError_enum { - pub const CUDA_ERROR_HARDWARE_STACK_ERROR: cudaError_enum = cudaError_enum(714); -} -impl cudaError_enum { - pub const CUDA_ERROR_ILLEGAL_INSTRUCTION: cudaError_enum = cudaError_enum(715); -} -impl cudaError_enum { - pub const CUDA_ERROR_MISALIGNED_ADDRESS: cudaError_enum = cudaError_enum(716); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_ADDRESS_SPACE: cudaError_enum = cudaError_enum(717); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_PC: cudaError_enum = cudaError_enum(718); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_FAILED: cudaError_enum = cudaError_enum(719); -} -impl cudaError_enum { - pub const CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE: cudaError_enum = cudaError_enum(720); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_PERMITTED: cudaError_enum = cudaError_enum(800); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_SUPPORTED: cudaError_enum = cudaError_enum(801); -} -impl cudaError_enum { - pub const CUDA_ERROR_SYSTEM_NOT_READY: cudaError_enum = cudaError_enum(802); -} -impl cudaError_enum { - pub const CUDA_ERROR_SYSTEM_DRIVER_MISMATCH: cudaError_enum = cudaError_enum(803); -} -impl cudaError_enum { - pub const CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: cudaError_enum = cudaError_enum(804); -} -impl cudaError_enum { - pub const CUDA_ERROR_MPS_CONNECTION_FAILED: cudaError_enum = cudaError_enum(805); -} -impl cudaError_enum { - pub const CUDA_ERROR_MPS_RPC_FAILURE: cudaError_enum = cudaError_enum(806); -} -impl cudaError_enum { - pub const CUDA_ERROR_MPS_SERVER_NOT_READY: cudaError_enum = cudaError_enum(807); -} -impl cudaError_enum { - pub const CUDA_ERROR_MPS_MAX_CLIENTS_REACHED: cudaError_enum = cudaError_enum(808); -} -impl cudaError_enum { - pub const CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED: cudaError_enum = cudaError_enum(809); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED: cudaError_enum = cudaError_enum(900); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_INVALIDATED: cudaError_enum = cudaError_enum(901); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_MERGE: cudaError_enum = cudaError_enum(902); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_UNMATCHED: cudaError_enum = cudaError_enum(903); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_UNJOINED: cudaError_enum = cudaError_enum(904); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_ISOLATION: cudaError_enum = cudaError_enum(905); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_IMPLICIT: cudaError_enum = cudaError_enum(906); -} -impl cudaError_enum { - pub const CUDA_ERROR_CAPTURED_EVENT: cudaError_enum = cudaError_enum(907); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD: cudaError_enum = cudaError_enum(908); -} -impl cudaError_enum { - pub const CUDA_ERROR_TIMEOUT: cudaError_enum = cudaError_enum(909); -} -impl cudaError_enum { - pub const CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE: cudaError_enum = cudaError_enum(910); -} -impl cudaError_enum { - pub const CUDA_ERROR_EXTERNAL_DEVICE: cudaError_enum = cudaError_enum(911); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNKNOWN: cudaError_enum = cudaError_enum(999); -} -#[repr(transparent)] -#[must_use] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct cudaError_enum(pub ::std::os::raw::c_uint); -pub use self::cudaError_enum as CUresult; -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(1); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(2); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(3); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(4); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUdevice_P2PAttribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUdevice_P2PAttribute_enum as CUdevice_P2PAttribute; -pub type CUstreamCallback = ::std::option::Option< - unsafe extern "C" fn( - hStream: CUstream, - status: CUresult, - userData: *mut ::std::os::raw::c_void, - ), ->; -pub type CUoccupancyB2DSize = - ::std::option::Option usize>; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_MEMCPY2D_st { - pub srcXInBytes: usize, - pub srcY: usize, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr, - pub srcArray: CUarray, - pub srcPitch: usize, - pub dstXInBytes: usize, - pub dstY: usize, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr, - pub dstArray: CUarray, - pub dstPitch: usize, - pub WidthInBytes: usize, - pub Height: usize, -} -pub type CUDA_MEMCPY2D_v2 = CUDA_MEMCPY2D_st; -pub type CUDA_MEMCPY2D = CUDA_MEMCPY2D_v2; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_MEMCPY3D_st { - pub srcXInBytes: usize, - pub srcY: usize, - pub srcZ: usize, - pub srcLOD: usize, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr, - pub srcArray: CUarray, - pub reserved0: *mut ::std::os::raw::c_void, - pub srcPitch: usize, - pub srcHeight: usize, - pub dstXInBytes: usize, - pub dstY: usize, - pub dstZ: usize, - pub dstLOD: usize, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr, - pub dstArray: CUarray, - pub reserved1: *mut ::std::os::raw::c_void, - pub dstPitch: usize, - pub dstHeight: usize, - pub WidthInBytes: usize, - pub Height: usize, - pub Depth: usize, -} -pub type CUDA_MEMCPY3D_v2 = CUDA_MEMCPY3D_st; -pub type CUDA_MEMCPY3D = CUDA_MEMCPY3D_v2; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_MEMCPY3D_PEER_st { - pub srcXInBytes: usize, - pub srcY: usize, - pub srcZ: usize, - pub srcLOD: usize, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr, - pub srcArray: CUarray, - pub srcContext: CUcontext, - pub srcPitch: usize, - pub srcHeight: usize, - pub dstXInBytes: usize, - pub dstY: usize, - pub dstZ: usize, - pub dstLOD: usize, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr, - pub dstArray: CUarray, - pub dstContext: CUcontext, - pub dstPitch: usize, - pub dstHeight: usize, - pub WidthInBytes: usize, - pub Height: usize, - pub Depth: usize, -} -pub type CUDA_MEMCPY3D_PEER_v1 = CUDA_MEMCPY3D_PEER_st; -pub type CUDA_MEMCPY3D_PEER = CUDA_MEMCPY3D_PEER_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_ARRAY_DESCRIPTOR_st { - pub Width: usize, - pub Height: usize, - pub Format: CUarray_format, - pub NumChannels: ::std::os::raw::c_uint, -} -pub type CUDA_ARRAY_DESCRIPTOR_v2 = CUDA_ARRAY_DESCRIPTOR_st; -pub type CUDA_ARRAY_DESCRIPTOR = CUDA_ARRAY_DESCRIPTOR_v2; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_ARRAY3D_DESCRIPTOR_st { - pub Width: usize, - pub Height: usize, - pub Depth: usize, - pub Format: CUarray_format, - pub NumChannels: ::std::os::raw::c_uint, - pub Flags: ::std::os::raw::c_uint, -} -pub type CUDA_ARRAY3D_DESCRIPTOR_v2 = CUDA_ARRAY3D_DESCRIPTOR_st; -pub type CUDA_ARRAY3D_DESCRIPTOR = CUDA_ARRAY3D_DESCRIPTOR_v2; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st { - pub tileExtent: CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1, - pub miptailFirstLevel: ::std::os::raw::c_uint, - pub miptailSize: ::std::os::raw::c_ulonglong, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 4usize], -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 { - pub width: ::std::os::raw::c_uint, - pub height: ::std::os::raw::c_uint, - pub depth: ::std::os::raw::c_uint, -} -pub type CUDA_ARRAY_SPARSE_PROPERTIES_v1 = CUDA_ARRAY_SPARSE_PROPERTIES_st; -pub type CUDA_ARRAY_SPARSE_PROPERTIES = CUDA_ARRAY_SPARSE_PROPERTIES_v1; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_DESC_st { - pub resType: CUresourcetype, - pub res: CUDA_RESOURCE_DESC_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_RESOURCE_DESC_st__bindgen_ty_1 { - pub array: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1, - pub mipmap: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2, - pub linear: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3, - pub pitch2D: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4, - pub reserved: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - pub hArray: CUarray, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 { - pub hMipmappedArray: CUmipmappedArray, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 { - pub devPtr: CUdeviceptr, - pub format: CUarray_format, - pub numChannels: ::std::os::raw::c_uint, - pub sizeInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 { - pub devPtr: CUdeviceptr, - pub format: CUarray_format, - pub numChannels: ::std::os::raw::c_uint, - pub width: usize, - pub height: usize, - pub pitchInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 { - pub reserved: [::std::os::raw::c_int; 32usize], -} -pub type CUDA_RESOURCE_DESC_v1 = CUDA_RESOURCE_DESC_st; -pub type CUDA_RESOURCE_DESC = CUDA_RESOURCE_DESC_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq)] -pub struct CUDA_TEXTURE_DESC_st { - pub addressMode: [CUaddress_mode; 3usize], - pub filterMode: CUfilter_mode, - pub flags: ::std::os::raw::c_uint, - pub maxAnisotropy: ::std::os::raw::c_uint, - pub mipmapFilterMode: CUfilter_mode, - pub mipmapLevelBias: f32, - pub minMipmapLevelClamp: f32, - pub maxMipmapLevelClamp: f32, - pub borderColor: [f32; 4usize], - pub reserved: [::std::os::raw::c_int; 12usize], -} -pub type CUDA_TEXTURE_DESC_v1 = CUDA_TEXTURE_DESC_st; -pub type CUDA_TEXTURE_DESC = CUDA_TEXTURE_DESC_v1; -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_NONE: CUresourceViewFormat_enum = CUresourceViewFormat_enum(0); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(1); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(2); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(3); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(4); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(5); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(6); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_1X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(7); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_2X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(8); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_4X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(9); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_1X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(10); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_2X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(11); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_4X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(12); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_1X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(13); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_2X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(14); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_4X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(15); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_1X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(16); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_2X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(17); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_4X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(18); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_1X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(19); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_2X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(20); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_4X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(21); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_1X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(22); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_2X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(23); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_4X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(24); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC1: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(25); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC2: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(26); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC3: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(27); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC4: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(28); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SIGNED_BC4: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(29); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC5: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(30); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SIGNED_BC5: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(31); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC6H: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(32); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SIGNED_BC6H: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(33); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC7: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(34); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUresourceViewFormat_enum(pub ::std::os::raw::c_uint); -pub use self::CUresourceViewFormat_enum as CUresourceViewFormat; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_RESOURCE_VIEW_DESC_st { - pub format: CUresourceViewFormat, - pub width: usize, - pub height: usize, - pub depth: usize, - pub firstMipmapLevel: ::std::os::raw::c_uint, - pub lastMipmapLevel: ::std::os::raw::c_uint, - pub firstLayer: ::std::os::raw::c_uint, - pub lastLayer: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -pub type CUDA_RESOURCE_VIEW_DESC_v1 = CUDA_RESOURCE_VIEW_DESC_st; -pub type CUDA_RESOURCE_VIEW_DESC = CUDA_RESOURCE_VIEW_DESC_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_LAUNCH_PARAMS_st { - pub function: CUfunction, - pub gridDimX: ::std::os::raw::c_uint, - pub gridDimY: ::std::os::raw::c_uint, - pub gridDimZ: ::std::os::raw::c_uint, - pub blockDimX: ::std::os::raw::c_uint, - pub blockDimY: ::std::os::raw::c_uint, - pub blockDimZ: ::std::os::raw::c_uint, - pub sharedMemBytes: ::std::os::raw::c_uint, - pub hStream: CUstream, - pub kernelParams: *mut *mut ::std::os::raw::c_void, -} -pub type CUDA_LAUNCH_PARAMS_v1 = CUDA_LAUNCH_PARAMS_st; -pub type CUDA_LAUNCH_PARAMS = CUDA_LAUNCH_PARAMS_v1; -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(1); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(2); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(3); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(4); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(5); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(6); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(7); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(8); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUexternalMemoryHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::CUexternalMemoryHandleType_enum as CUexternalMemoryHandleType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { - pub type_: CUexternalMemoryHandleType, - pub handle: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1, - pub size: ::std::os::raw::c_ulonglong, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1 { - pub fd: ::std::os::raw::c_int, - pub win32: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciBufObject: *const ::std::os::raw::c_void, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - pub handle: *mut ::std::os::raw::c_void, - pub name: *const ::std::os::raw::c_void, -} -pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st; -pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { - pub offset: ::std::os::raw::c_ulonglong, - pub size: ::std::os::raw::c_ulonglong, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st; -pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { - pub offset: ::std::os::raw::c_ulonglong, - pub arrayDesc: CUDA_ARRAY3D_DESCRIPTOR, - pub numLevels: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 = - CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st; -pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1; -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(1); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(2); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(3); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(4); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(5); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(6); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(7); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(8); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(9); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(10); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUexternalSemaphoreHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::CUexternalSemaphoreHandleType_enum as CUexternalSemaphoreHandleType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { - pub type_: CUexternalSemaphoreHandleType, - pub handle: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1 { - pub fd: ::std::os::raw::c_int, - pub win32: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSyncObj: *const ::std::os::raw::c_void, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - pub handle: *mut ::std::os::raw::c_void, - pub name: *const ::std::os::raw::c_void, -} -pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st; -pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { - pub params: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 { - pub fence: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2, - pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3, - pub reserved: [::std::os::raw::c_uint; 12usize], -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { - pub value: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2 { - pub fence: *mut ::std::os::raw::c_void, - pub reserved: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { - pub key: ::std::os::raw::c_ulonglong, -} -pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st; -pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { - pub params: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 { - pub fence: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2, - pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3, - pub reserved: [::std::os::raw::c_uint; 10usize], -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { - pub value: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2 { - pub fence: *mut ::std::os::raw::c_void, - pub reserved: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { - pub key: ::std::os::raw::c_ulonglong, - pub timeoutMs: ::std::os::raw::c_uint, -} -pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st; -pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st { - pub extSemArray: *mut CUexternalSemaphore, - pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, - pub numExtSems: ::std::os::raw::c_uint, -} -pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st; -pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st { - pub extSemArray: *mut CUexternalSemaphore, - pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, - pub numExtSems: ::std::os::raw::c_uint, -} -pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 = CUDA_EXT_SEM_WAIT_NODE_PARAMS_st; -pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS = CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1; -pub type CUmemGenericAllocationHandle_v1 = ::std::os::raw::c_ulonglong; -pub type CUmemGenericAllocationHandle = CUmemGenericAllocationHandle_v1; -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_NONE: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(0); -} -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(1); -} -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_WIN32: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(2); -} -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_WIN32_KMT: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(4); -} -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_MAX: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(2147483647); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemAllocationHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAllocationHandleType_enum as CUmemAllocationHandleType; -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_NONE: CUmemAccess_flags_enum = CUmemAccess_flags_enum(0); -} -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_READ: CUmemAccess_flags_enum = CUmemAccess_flags_enum(1); -} -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_READWRITE: CUmemAccess_flags_enum = - CUmemAccess_flags_enum(3); -} -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_MAX: CUmemAccess_flags_enum = - CUmemAccess_flags_enum(2147483647); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemAccess_flags_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAccess_flags_enum as CUmemAccess_flags; -impl CUmemLocationType_enum { - pub const CU_MEM_LOCATION_TYPE_INVALID: CUmemLocationType_enum = CUmemLocationType_enum(0); -} -impl CUmemLocationType_enum { - pub const CU_MEM_LOCATION_TYPE_DEVICE: CUmemLocationType_enum = CUmemLocationType_enum(1); -} -impl CUmemLocationType_enum { - pub const CU_MEM_LOCATION_TYPE_MAX: CUmemLocationType_enum = CUmemLocationType_enum(2147483647); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemLocationType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemLocationType_enum as CUmemLocationType; -impl CUmemAllocationType_enum { - pub const CU_MEM_ALLOCATION_TYPE_INVALID: CUmemAllocationType_enum = - CUmemAllocationType_enum(0); -} -impl CUmemAllocationType_enum { - pub const CU_MEM_ALLOCATION_TYPE_PINNED: CUmemAllocationType_enum = CUmemAllocationType_enum(1); -} -impl CUmemAllocationType_enum { - pub const CU_MEM_ALLOCATION_TYPE_MAX: CUmemAllocationType_enum = - CUmemAllocationType_enum(2147483647); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemAllocationType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAllocationType_enum as CUmemAllocationType; -impl CUmemAllocationGranularity_flags_enum { - pub const CU_MEM_ALLOC_GRANULARITY_MINIMUM: CUmemAllocationGranularity_flags_enum = - CUmemAllocationGranularity_flags_enum(0); -} -impl CUmemAllocationGranularity_flags_enum { - pub const CU_MEM_ALLOC_GRANULARITY_RECOMMENDED: CUmemAllocationGranularity_flags_enum = - CUmemAllocationGranularity_flags_enum(1); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemAllocationGranularity_flags_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAllocationGranularity_flags_enum as CUmemAllocationGranularity_flags; -impl CUarraySparseSubresourceType_enum { - pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL: CUarraySparseSubresourceType_enum = - CUarraySparseSubresourceType_enum(0); -} -impl CUarraySparseSubresourceType_enum { - pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL: CUarraySparseSubresourceType_enum = - CUarraySparseSubresourceType_enum(1); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUarraySparseSubresourceType_enum(pub ::std::os::raw::c_uint); -pub use self::CUarraySparseSubresourceType_enum as CUarraySparseSubresourceType; -impl CUmemOperationType_enum { - pub const CU_MEM_OPERATION_TYPE_MAP: CUmemOperationType_enum = CUmemOperationType_enum(1); -} -impl CUmemOperationType_enum { - pub const CU_MEM_OPERATION_TYPE_UNMAP: CUmemOperationType_enum = CUmemOperationType_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemOperationType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemOperationType_enum as CUmemOperationType; -impl CUmemHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_GENERIC: CUmemHandleType_enum = CUmemHandleType_enum(0); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemHandleType_enum as CUmemHandleType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUarrayMapInfo_st { - pub resourceType: CUresourcetype, - pub resource: CUarrayMapInfo_st__bindgen_ty_1, - pub subresourceType: CUarraySparseSubresourceType, - pub subresource: CUarrayMapInfo_st__bindgen_ty_2, - pub memOperationType: CUmemOperationType, - pub memHandleType: CUmemHandleType, - pub memHandle: CUarrayMapInfo_st__bindgen_ty_3, - pub offset: ::std::os::raw::c_ulonglong, - pub deviceBitMask: ::std::os::raw::c_uint, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 2usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUarrayMapInfo_st__bindgen_ty_1 { - pub mipmap: CUmipmappedArray, - pub array: CUarray, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUarrayMapInfo_st__bindgen_ty_2 { - pub sparseLevel: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1, - pub miptail: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 { - pub level: ::std::os::raw::c_uint, - pub layer: ::std::os::raw::c_uint, - pub offsetX: ::std::os::raw::c_uint, - pub offsetY: ::std::os::raw::c_uint, - pub offsetZ: ::std::os::raw::c_uint, - pub extentWidth: ::std::os::raw::c_uint, - pub extentHeight: ::std::os::raw::c_uint, - pub extentDepth: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 { - pub layer: ::std::os::raw::c_uint, - pub offset: ::std::os::raw::c_ulonglong, - pub size: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUarrayMapInfo_st__bindgen_ty_3 { - pub memHandle: CUmemGenericAllocationHandle, -} -pub type CUarrayMapInfo_v1 = CUarrayMapInfo_st; -pub type CUarrayMapInfo = CUarrayMapInfo_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUmemLocation_st { - pub type_: CUmemLocationType, - pub id: ::std::os::raw::c_int, -} -pub type CUmemLocation_v1 = CUmemLocation_st; -pub type CUmemLocation = CUmemLocation_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUmemAllocationProp_st { - pub type_: CUmemAllocationType, - pub requestedHandleTypes: CUmemAllocationHandleType, - pub location: CUmemLocation, - pub win32HandleMetaData: *mut ::std::os::raw::c_void, - pub allocFlags: CUmemAllocationProp_st__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUmemAllocationProp_st__bindgen_ty_1 { - pub compressionType: ::std::os::raw::c_uchar, - pub gpuDirectRDMACapable: ::std::os::raw::c_uchar, - pub usage: ::std::os::raw::c_ushort, - pub reserved: [::std::os::raw::c_uchar; 4usize], -} -pub type CUmemAllocationProp_v1 = CUmemAllocationProp_st; -pub type CUmemAllocationProp = CUmemAllocationProp_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUmemAccessDesc_st { - pub location: CUmemLocation, - pub flags: CUmemAccess_flags, -} -pub type CUmemAccessDesc_v1 = CUmemAccessDesc_st; -pub type CUmemAccessDesc = CUmemAccessDesc_v1; -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_SUCCESS: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(0); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(1); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(2); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(3); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(4); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(5); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(6); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(7); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUgraphExecUpdateResult_enum(pub ::std::os::raw::c_uint); -pub use self::CUgraphExecUpdateResult_enum as CUgraphExecUpdateResult; -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: CUmemPool_attribute_enum = - CUmemPool_attribute_enum(1); -} -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: CUmemPool_attribute_enum = - CUmemPool_attribute_enum(2); -} -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: CUmemPool_attribute_enum = - CUmemPool_attribute_enum(3); -} -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: CUmemPool_attribute_enum = - CUmemPool_attribute_enum(4); -} -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: CUmemPool_attribute_enum = - CUmemPool_attribute_enum(5); -} -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: CUmemPool_attribute_enum = - CUmemPool_attribute_enum(6); -} -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_USED_MEM_CURRENT: CUmemPool_attribute_enum = - CUmemPool_attribute_enum(7); -} -impl CUmemPool_attribute_enum { - pub const CU_MEMPOOL_ATTR_USED_MEM_HIGH: CUmemPool_attribute_enum = CUmemPool_attribute_enum(8); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUmemPool_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemPool_attribute_enum as CUmemPool_attribute; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUmemPoolProps_st { - pub allocType: CUmemAllocationType, - pub handleTypes: CUmemAllocationHandleType, - pub location: CUmemLocation, - pub win32SecurityAttributes: *mut ::std::os::raw::c_void, - pub reserved: [::std::os::raw::c_uchar; 64usize], -} -pub type CUmemPoolProps_v1 = CUmemPoolProps_st; -pub type CUmemPoolProps = CUmemPoolProps_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUmemPoolPtrExportData_st { - pub reserved: [::std::os::raw::c_uchar; 64usize], -} -pub type CUmemPoolPtrExportData_v1 = CUmemPoolPtrExportData_st; -pub type CUmemPoolPtrExportData = CUmemPoolPtrExportData_v1; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_MEM_ALLOC_NODE_PARAMS_st { - pub poolProps: CUmemPoolProps, - pub accessDescs: *const CUmemAccessDesc, - pub accessDescCount: usize, - pub bytesize: usize, - pub dptr: CUdeviceptr, -} -pub type CUDA_MEM_ALLOC_NODE_PARAMS = CUDA_MEM_ALLOC_NODE_PARAMS_st; -impl CUgraphMem_attribute_enum { - pub const CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: CUgraphMem_attribute_enum = - CUgraphMem_attribute_enum(0); -} -impl CUgraphMem_attribute_enum { - pub const CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: CUgraphMem_attribute_enum = - CUgraphMem_attribute_enum(1); -} -impl CUgraphMem_attribute_enum { - pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: CUgraphMem_attribute_enum = - CUgraphMem_attribute_enum(2); -} -impl CUgraphMem_attribute_enum { - pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: CUgraphMem_attribute_enum = - CUgraphMem_attribute_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUgraphMem_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUgraphMem_attribute_enum as CUgraphMem_attribute; -impl CUflushGPUDirectRDMAWritesScope_enum { - pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER: CUflushGPUDirectRDMAWritesScope_enum = - CUflushGPUDirectRDMAWritesScope_enum(100); -} -impl CUflushGPUDirectRDMAWritesScope_enum { - pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES: CUflushGPUDirectRDMAWritesScope_enum = - CUflushGPUDirectRDMAWritesScope_enum(200); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUflushGPUDirectRDMAWritesScope_enum(pub ::std::os::raw::c_uint); -pub use self::CUflushGPUDirectRDMAWritesScope_enum as CUflushGPUDirectRDMAWritesScope; -impl CUflushGPUDirectRDMAWritesTarget_enum { - pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX: - CUflushGPUDirectRDMAWritesTarget_enum = CUflushGPUDirectRDMAWritesTarget_enum(0); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUflushGPUDirectRDMAWritesTarget_enum(pub ::std::os::raw::c_uint); -pub use self::CUflushGPUDirectRDMAWritesTarget_enum as CUflushGPUDirectRDMAWritesTarget; -extern "C" { - pub fn cuGetErrorString(error: CUresult, pStr: *mut *const ::std::os::raw::c_char) -> CUresult; -} -extern "C" { - pub fn cuGetErrorName(error: CUresult, pStr: *mut *const ::std::os::raw::c_char) -> CUresult; -} -extern "C" { - pub fn cuInit(Flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuDriverGetVersion(driverVersion: *mut ::std::os::raw::c_int) -> CUresult; -} -extern "C" { - pub fn cuDeviceGet(device: *mut CUdevice, ordinal: ::std::os::raw::c_int) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetCount(count: *mut ::std::os::raw::c_int) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetName( - name: *mut ::std::os::raw::c_char, - len: ::std::os::raw::c_int, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetUuid(uuid: *mut CUuuid, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetUuid_v2(uuid: *mut CUuuid, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetLuid( - luid: *mut ::std::os::raw::c_char, - deviceNodeMask: *mut ::std::os::raw::c_uint, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceTotalMem_v2(bytes: *mut usize, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetTexture1DLinearMaxWidth( - maxWidthInElements: *mut usize, - format: CUarray_format, - numChannels: ::std::os::raw::c_uint, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetAttribute( - pi: *mut ::std::os::raw::c_int, - attrib: CUdevice_attribute, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetNvSciSyncAttributes( - nvSciSyncAttrList: *mut ::std::os::raw::c_void, - dev: CUdevice, - flags: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceSetMemPool(dev: CUdevice, pool: CUmemoryPool) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetMemPool(pool: *mut CUmemoryPool, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetDefaultMemPool(pool_out: *mut CUmemoryPool, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuFlushGPUDirectRDMAWrites( - target: CUflushGPUDirectRDMAWritesTarget, - scope: CUflushGPUDirectRDMAWritesScope, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetProperties(prop: *mut CUdevprop, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDeviceComputeCapability( - major: *mut ::std::os::raw::c_int, - minor: *mut ::std::os::raw::c_int, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxRetain(pctx: *mut CUcontext, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxRelease_v2(dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxSetFlags_v2(dev: CUdevice, flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxGetState( - dev: CUdevice, - flags: *mut ::std::os::raw::c_uint, - active: *mut ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxReset_v2(dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetExecAffinitySupport( - pi: *mut ::std::os::raw::c_int, - type_: CUexecAffinityType, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuCtxCreate_v2( - pctx: *mut CUcontext, - flags: ::std::os::raw::c_uint, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuCtxCreate_v3( - pctx: *mut CUcontext, - paramsArray: *mut CUexecAffinityParam, - numParams: ::std::os::raw::c_int, - flags: ::std::os::raw::c_uint, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuCtxDestroy_v2(ctx: CUcontext) -> CUresult; -} -extern "C" { - pub fn cuCtxPushCurrent_v2(ctx: CUcontext) -> CUresult; -} -extern "C" { - pub fn cuCtxPopCurrent_v2(pctx: *mut CUcontext) -> CUresult; -} -extern "C" { - pub fn cuCtxSetCurrent(ctx: CUcontext) -> CUresult; -} -extern "C" { - pub fn cuCtxGetCurrent(pctx: *mut CUcontext) -> CUresult; -} -extern "C" { - pub fn cuCtxGetDevice(device: *mut CUdevice) -> CUresult; -} -extern "C" { - pub fn cuCtxGetFlags(flags: *mut ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuCtxSynchronize() -> CUresult; -} -extern "C" { - pub fn cuCtxSetLimit(limit: CUlimit, value: usize) -> CUresult; -} -extern "C" { - pub fn cuCtxGetLimit(pvalue: *mut usize, limit: CUlimit) -> CUresult; -} -extern "C" { - pub fn cuCtxGetCacheConfig(pconfig: *mut CUfunc_cache) -> CUresult; -} -extern "C" { - pub fn cuCtxSetCacheConfig(config: CUfunc_cache) -> CUresult; -} -extern "C" { - pub fn cuCtxGetSharedMemConfig(pConfig: *mut CUsharedconfig) -> CUresult; -} -extern "C" { - pub fn cuCtxSetSharedMemConfig(config: CUsharedconfig) -> CUresult; -} -extern "C" { - pub fn cuCtxGetApiVersion(ctx: CUcontext, version: *mut ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuCtxGetStreamPriorityRange( - leastPriority: *mut ::std::os::raw::c_int, - greatestPriority: *mut ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuCtxResetPersistingL2Cache() -> CUresult; -} -extern "C" { - pub fn cuCtxGetExecAffinity( - pExecAffinity: *mut CUexecAffinityParam, - type_: CUexecAffinityType, - ) -> CUresult; -} -extern "C" { - pub fn cuCtxAttach(pctx: *mut CUcontext, flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuCtxDetach(ctx: CUcontext) -> CUresult; -} -extern "C" { - pub fn cuModuleLoad(module: *mut CUmodule, fname: *const ::std::os::raw::c_char) -> CUresult; -} -extern "C" { - pub fn cuModuleLoadData( - module: *mut CUmodule, - image: *const ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuModuleLoadDataEx( - module: *mut CUmodule, - image: *const ::std::os::raw::c_void, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuModuleLoadFatBinary( - module: *mut CUmodule, - fatCubin: *const ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuModuleUnload(hmod: CUmodule) -> CUresult; -} -extern "C" { - pub fn cuModuleGetFunction( - hfunc: *mut CUfunction, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, - ) -> CUresult; -} -extern "C" { - pub fn cuModuleGetGlobal_v2( - dptr: *mut CUdeviceptr, - bytes: *mut usize, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, - ) -> CUresult; -} -extern "C" { - pub fn cuModuleGetTexRef( - pTexRef: *mut CUtexref, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, - ) -> CUresult; -} -extern "C" { - pub fn cuModuleGetSurfRef( - pSurfRef: *mut CUsurfref, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkCreate_v2( - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - stateOut: *mut CUlinkState, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkAddData_v2( - state: CUlinkState, - type_: CUjitInputType, - data: *mut ::std::os::raw::c_void, - size: usize, - name: *const ::std::os::raw::c_char, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkAddFile_v2( - state: CUlinkState, - type_: CUjitInputType, - path: *const ::std::os::raw::c_char, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkComplete( - state: CUlinkState, - cubinOut: *mut *mut ::std::os::raw::c_void, - sizeOut: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkDestroy(state: CUlinkState) -> CUresult; -} -extern "C" { - pub fn cuMemGetInfo_v2(free: *mut usize, total: *mut usize) -> CUresult; -} -extern "C" { - pub fn cuMemAlloc_v2(dptr: *mut CUdeviceptr, bytesize: usize) -> CUresult; -} -extern "C" { - pub fn cuMemAllocPitch_v2( - dptr: *mut CUdeviceptr, - pPitch: *mut usize, - WidthInBytes: usize, - Height: usize, - ElementSizeBytes: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemFree_v2(dptr: CUdeviceptr) -> CUresult; -} -extern "C" { - pub fn cuMemGetAddressRange_v2( - pbase: *mut CUdeviceptr, - psize: *mut usize, - dptr: CUdeviceptr, - ) -> CUresult; -} -extern "C" { - pub fn cuMemAllocHost_v2(pp: *mut *mut ::std::os::raw::c_void, bytesize: usize) -> CUresult; -} -extern "C" { - pub fn cuMemFreeHost(p: *mut ::std::os::raw::c_void) -> CUresult; -} -extern "C" { - pub fn cuMemHostAlloc( - pp: *mut *mut ::std::os::raw::c_void, - bytesize: usize, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemHostGetDevicePointer_v2( - pdptr: *mut CUdeviceptr, - p: *mut ::std::os::raw::c_void, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemHostGetFlags( - pFlags: *mut ::std::os::raw::c_uint, - p: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuMemAllocManaged( - dptr: *mut CUdeviceptr, - bytesize: usize, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetByPCIBusId( - dev: *mut CUdevice, - pciBusId: *const ::std::os::raw::c_char, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetPCIBusId( - pciBusId: *mut ::std::os::raw::c_char, - len: ::std::os::raw::c_int, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuIpcGetEventHandle(pHandle: *mut CUipcEventHandle, event: CUevent) -> CUresult; -} -extern "C" { - pub fn cuIpcOpenEventHandle(phEvent: *mut CUevent, handle: CUipcEventHandle) -> CUresult; -} -extern "C" { - pub fn cuIpcGetMemHandle(pHandle: *mut CUipcMemHandle, dptr: CUdeviceptr) -> CUresult; -} -extern "C" { - pub fn cuIpcOpenMemHandle_v2( - pdptr: *mut CUdeviceptr, - handle: CUipcMemHandle, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuIpcCloseMemHandle(dptr: CUdeviceptr) -> CUresult; -} -extern "C" { - pub fn cuMemHostRegister_v2( - p: *mut ::std::os::raw::c_void, - bytesize: usize, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemHostUnregister(p: *mut ::std::os::raw::c_void) -> CUresult; -} -extern "C" { - pub fn cuMemcpy_ptds(dst: CUdeviceptr, src: CUdeviceptr, ByteCount: usize) -> CUresult; -} -extern "C" { - pub fn cuMemcpyPeer_ptds( - dstDevice: CUdeviceptr, - dstContext: CUcontext, - srcDevice: CUdeviceptr, - srcContext: CUcontext, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoD_v2_ptds( - dstDevice: CUdeviceptr, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoH_v2_ptds( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoD_v2_ptds( - dstDevice: CUdeviceptr, - srcDevice: CUdeviceptr, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoA_v2_ptds( - dstArray: CUarray, - dstOffset: usize, - srcDevice: CUdeviceptr, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoD_v2_ptds( - dstDevice: CUdeviceptr, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoA_v2_ptds( - dstArray: CUarray, - dstOffset: usize, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoH_v2_ptds( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoA_v2_ptds( - dstArray: CUarray, - dstOffset: usize, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2D_v2_ptds(pCopy: *const CUDA_MEMCPY2D) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2DUnaligned_v2_ptds(pCopy: *const CUDA_MEMCPY2D) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3D_v2_ptds(pCopy: *const CUDA_MEMCPY3D) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3DPeer_ptds(pCopy: *const CUDA_MEMCPY3D_PEER) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAsync_ptsz( - dst: CUdeviceptr, - src: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyPeerAsync_ptsz( - dstDevice: CUdeviceptr, - dstContext: CUcontext, - srcDevice: CUdeviceptr, - srcContext: CUcontext, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoDAsync_v2_ptsz( - dstDevice: CUdeviceptr, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoHAsync_v2_ptsz( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoDAsync_v2_ptsz( - dstDevice: CUdeviceptr, - srcDevice: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoAAsync_v2_ptsz( - dstArray: CUarray, - dstOffset: usize, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoHAsync_v2_ptsz( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2DAsync_v2_ptsz(pCopy: *const CUDA_MEMCPY2D, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3DAsync_v2_ptsz(pCopy: *const CUDA_MEMCPY3D, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3DPeerAsync_ptsz( - pCopy: *const CUDA_MEMCPY3D_PEER, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD8_v2_ptds( - dstDevice: CUdeviceptr, - uc: ::std::os::raw::c_uchar, - N: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD16_v2_ptds( - dstDevice: CUdeviceptr, - us: ::std::os::raw::c_ushort, - N: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD32_v2_ptds( - dstDevice: CUdeviceptr, - ui: ::std::os::raw::c_uint, - N: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D8_v2_ptds( - dstDevice: CUdeviceptr, - dstPitch: usize, - uc: ::std::os::raw::c_uchar, - Width: usize, - Height: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D16_v2_ptds( - dstDevice: CUdeviceptr, - dstPitch: usize, - us: ::std::os::raw::c_ushort, - Width: usize, - Height: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D32_v2_ptds( - dstDevice: CUdeviceptr, - dstPitch: usize, - ui: ::std::os::raw::c_uint, - Width: usize, - Height: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD8Async_ptsz( - dstDevice: CUdeviceptr, - uc: ::std::os::raw::c_uchar, - N: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD16Async_ptsz( - dstDevice: CUdeviceptr, - us: ::std::os::raw::c_ushort, - N: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD32Async_ptsz( - dstDevice: CUdeviceptr, - ui: ::std::os::raw::c_uint, - N: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D8Async_ptsz( - dstDevice: CUdeviceptr, - dstPitch: usize, - uc: ::std::os::raw::c_uchar, - Width: usize, - Height: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D16Async_ptsz( - dstDevice: CUdeviceptr, - dstPitch: usize, - us: ::std::os::raw::c_ushort, - Width: usize, - Height: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D32Async_ptsz( - dstDevice: CUdeviceptr, - dstPitch: usize, - ui: ::std::os::raw::c_uint, - Width: usize, - Height: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuArrayCreate_v2( - pHandle: *mut CUarray, - pAllocateArray: *const CUDA_ARRAY_DESCRIPTOR, - ) -> CUresult; -} -extern "C" { - pub fn cuArrayGetDescriptor_v2( - pArrayDescriptor: *mut CUDA_ARRAY_DESCRIPTOR, - hArray: CUarray, - ) -> CUresult; -} -extern "C" { - pub fn cuArrayGetSparseProperties( - sparseProperties: *mut CUDA_ARRAY_SPARSE_PROPERTIES, - array: CUarray, - ) -> CUresult; -} -extern "C" { - pub fn cuMipmappedArrayGetSparseProperties( - sparseProperties: *mut CUDA_ARRAY_SPARSE_PROPERTIES, - mipmap: CUmipmappedArray, - ) -> CUresult; -} -extern "C" { - pub fn cuArrayGetPlane( - pPlaneArray: *mut CUarray, - hArray: CUarray, - planeIdx: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuArrayDestroy(hArray: CUarray) -> CUresult; -} -extern "C" { - pub fn cuArray3DCreate_v2( - pHandle: *mut CUarray, - pAllocateArray: *const CUDA_ARRAY3D_DESCRIPTOR, - ) -> CUresult; -} -extern "C" { - pub fn cuArray3DGetDescriptor_v2( - pArrayDescriptor: *mut CUDA_ARRAY3D_DESCRIPTOR, - hArray: CUarray, - ) -> CUresult; -} -extern "C" { - pub fn cuMipmappedArrayCreate( - pHandle: *mut CUmipmappedArray, - pMipmappedArrayDesc: *const CUDA_ARRAY3D_DESCRIPTOR, - numMipmapLevels: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMipmappedArrayGetLevel( - pLevelArray: *mut CUarray, - hMipmappedArray: CUmipmappedArray, - level: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMipmappedArrayDestroy(hMipmappedArray: CUmipmappedArray) -> CUresult; -} -extern "C" { - pub fn cuMemAddressReserve( - ptr: *mut CUdeviceptr, - size: usize, - alignment: usize, - addr: CUdeviceptr, - flags: ::std::os::raw::c_ulonglong, - ) -> CUresult; -} -extern "C" { - pub fn cuMemAddressFree(ptr: CUdeviceptr, size: usize) -> CUresult; -} -extern "C" { - pub fn cuMemCreate( - handle: *mut CUmemGenericAllocationHandle, - size: usize, - prop: *const CUmemAllocationProp, - flags: ::std::os::raw::c_ulonglong, - ) -> CUresult; -} -extern "C" { - pub fn cuMemRelease(handle: CUmemGenericAllocationHandle) -> CUresult; -} -extern "C" { - pub fn cuMemMap( - ptr: CUdeviceptr, - size: usize, - offset: usize, - handle: CUmemGenericAllocationHandle, - flags: ::std::os::raw::c_ulonglong, - ) -> CUresult; -} -extern "C" { - pub fn cuMemMapArrayAsync_ptsz( - mapInfoList: *mut CUarrayMapInfo, - count: ::std::os::raw::c_uint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemUnmap(ptr: CUdeviceptr, size: usize) -> CUresult; -} -extern "C" { - pub fn cuMemSetAccess( - ptr: CUdeviceptr, - size: usize, - desc: *const CUmemAccessDesc, - count: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemGetAccess( - flags: *mut ::std::os::raw::c_ulonglong, - location: *const CUmemLocation, - ptr: CUdeviceptr, - ) -> CUresult; -} -extern "C" { - pub fn cuMemExportToShareableHandle( - shareableHandle: *mut ::std::os::raw::c_void, - handle: CUmemGenericAllocationHandle, - handleType: CUmemAllocationHandleType, - flags: ::std::os::raw::c_ulonglong, - ) -> CUresult; -} -extern "C" { - pub fn cuMemImportFromShareableHandle( - handle: *mut CUmemGenericAllocationHandle, - osHandle: *mut ::std::os::raw::c_void, - shHandleType: CUmemAllocationHandleType, - ) -> CUresult; -} -extern "C" { - pub fn cuMemGetAllocationGranularity( - granularity: *mut usize, - prop: *const CUmemAllocationProp, - option: CUmemAllocationGranularity_flags, - ) -> CUresult; -} -extern "C" { - pub fn cuMemGetAllocationPropertiesFromHandle( - prop: *mut CUmemAllocationProp, - handle: CUmemGenericAllocationHandle, - ) -> CUresult; -} -extern "C" { - pub fn cuMemRetainAllocationHandle( - handle: *mut CUmemGenericAllocationHandle, - addr: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuMemFreeAsync_ptsz(dptr: CUdeviceptr, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemAllocAsync_ptsz( - dptr: *mut CUdeviceptr, - bytesize: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolTrimTo(pool: CUmemoryPool, minBytesToKeep: usize) -> CUresult; -} -extern "C" { - pub fn cuMemPoolSetAttribute( - pool: CUmemoryPool, - attr: CUmemPool_attribute, - value: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolGetAttribute( - pool: CUmemoryPool, - attr: CUmemPool_attribute, - value: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolSetAccess( - pool: CUmemoryPool, - map: *const CUmemAccessDesc, - count: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolGetAccess( - flags: *mut CUmemAccess_flags, - memPool: CUmemoryPool, - location: *mut CUmemLocation, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolCreate(pool: *mut CUmemoryPool, poolProps: *const CUmemPoolProps) -> CUresult; -} -extern "C" { - pub fn cuMemPoolDestroy(pool: CUmemoryPool) -> CUresult; -} -extern "C" { - pub fn cuMemAllocFromPoolAsync_ptsz( - dptr: *mut CUdeviceptr, - bytesize: usize, - pool: CUmemoryPool, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolExportToShareableHandle( - handle_out: *mut ::std::os::raw::c_void, - pool: CUmemoryPool, - handleType: CUmemAllocationHandleType, - flags: ::std::os::raw::c_ulonglong, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolImportFromShareableHandle( - pool_out: *mut CUmemoryPool, - handle: *mut ::std::os::raw::c_void, - handleType: CUmemAllocationHandleType, - flags: ::std::os::raw::c_ulonglong, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolExportPointer( - shareData_out: *mut CUmemPoolPtrExportData, - ptr: CUdeviceptr, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPoolImportPointer( - ptr_out: *mut CUdeviceptr, - pool: CUmemoryPool, - shareData: *mut CUmemPoolPtrExportData, - ) -> CUresult; -} -extern "C" { - pub fn cuPointerGetAttribute( - data: *mut ::std::os::raw::c_void, - attribute: CUpointer_attribute, - ptr: CUdeviceptr, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPrefetchAsync_ptsz( - devPtr: CUdeviceptr, - count: usize, - dstDevice: CUdevice, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemAdvise( - devPtr: CUdeviceptr, - count: usize, - advice: CUmem_advise, - device: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuMemRangeGetAttribute( - data: *mut ::std::os::raw::c_void, - dataSize: usize, - attribute: CUmem_range_attribute, - devPtr: CUdeviceptr, - count: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemRangeGetAttributes( - data: *mut *mut ::std::os::raw::c_void, - dataSizes: *mut usize, - attributes: *mut CUmem_range_attribute, - numAttributes: usize, - devPtr: CUdeviceptr, - count: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuPointerSetAttribute( - value: *const ::std::os::raw::c_void, - attribute: CUpointer_attribute, - ptr: CUdeviceptr, - ) -> CUresult; -} -extern "C" { - pub fn cuPointerGetAttributes( - numAttributes: ::std::os::raw::c_uint, - attributes: *mut CUpointer_attribute, - data: *mut *mut ::std::os::raw::c_void, - ptr: CUdeviceptr, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamCreate(phStream: *mut CUstream, Flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuStreamCreateWithPriority( - phStream: *mut CUstream, - flags: ::std::os::raw::c_uint, - priority: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamGetPriority_ptsz( - hStream: CUstream, - priority: *mut ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamGetFlags_ptsz(hStream: CUstream, flags: *mut ::std::os::raw::c_uint) - -> CUresult; -} -extern "C" { - pub fn cuStreamGetCtx_ptsz(hStream: CUstream, pctx: *mut CUcontext) -> CUresult; -} -extern "C" { - pub fn cuStreamWaitEvent_ptsz( - hStream: CUstream, - hEvent: CUevent, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamAddCallback_ptsz( - hStream: CUstream, - callback: CUstreamCallback, - userData: *mut ::std::os::raw::c_void, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamBeginCapture_v2_ptsz(hStream: CUstream, mode: CUstreamCaptureMode) -> CUresult; -} -extern "C" { - pub fn cuThreadExchangeStreamCaptureMode(mode: *mut CUstreamCaptureMode) -> CUresult; -} -extern "C" { - pub fn cuStreamEndCapture_ptsz(hStream: CUstream, phGraph: *mut CUgraph) -> CUresult; -} -extern "C" { - pub fn cuStreamIsCapturing_ptsz( - hStream: CUstream, - captureStatus: *mut CUstreamCaptureStatus, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamGetCaptureInfo_ptsz( - hStream: CUstream, - captureStatus_out: *mut CUstreamCaptureStatus, - id_out: *mut cuuint64_t, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamGetCaptureInfo_v2_ptsz( - hStream: CUstream, - captureStatus_out: *mut CUstreamCaptureStatus, - id_out: *mut cuuint64_t, - graph_out: *mut CUgraph, - dependencies_out: *mut *const CUgraphNode, - numDependencies_out: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamUpdateCaptureDependencies_ptsz( - hStream: CUstream, - dependencies: *mut CUgraphNode, - numDependencies: usize, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamAttachMemAsync_ptsz( - hStream: CUstream, - dptr: CUdeviceptr, - length: usize, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamQuery_ptsz(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamSynchronize_ptsz(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamDestroy_v2(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamCopyAttributes_ptsz(dst: CUstream, src: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamGetAttribute_ptsz( - hStream: CUstream, - attr: CUstreamAttrID, - value_out: *mut CUstreamAttrValue, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamSetAttribute_ptsz( - hStream: CUstream, - attr: CUstreamAttrID, - value: *const CUstreamAttrValue, - ) -> CUresult; -} -extern "C" { - pub fn cuEventCreate(phEvent: *mut CUevent, Flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuEventRecord_ptsz(hEvent: CUevent, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuEventRecordWithFlags_ptsz( - hEvent: CUevent, - hStream: CUstream, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuEventQuery(hEvent: CUevent) -> CUresult; -} -extern "C" { - pub fn cuEventSynchronize(hEvent: CUevent) -> CUresult; -} -extern "C" { - pub fn cuEventDestroy_v2(hEvent: CUevent) -> CUresult; -} -extern "C" { - pub fn cuEventElapsedTime(pMilliseconds: *mut f32, hStart: CUevent, hEnd: CUevent) -> CUresult; -} -extern "C" { - pub fn cuImportExternalMemory( - extMem_out: *mut CUexternalMemory, - memHandleDesc: *const CUDA_EXTERNAL_MEMORY_HANDLE_DESC, - ) -> CUresult; -} -extern "C" { - pub fn cuExternalMemoryGetMappedBuffer( - devPtr: *mut CUdeviceptr, - extMem: CUexternalMemory, - bufferDesc: *const CUDA_EXTERNAL_MEMORY_BUFFER_DESC, - ) -> CUresult; -} -extern "C" { - pub fn cuExternalMemoryGetMappedMipmappedArray( - mipmap: *mut CUmipmappedArray, - extMem: CUexternalMemory, - mipmapDesc: *const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC, - ) -> CUresult; -} -extern "C" { - pub fn cuDestroyExternalMemory(extMem: CUexternalMemory) -> CUresult; -} -extern "C" { - pub fn cuImportExternalSemaphore( - extSem_out: *mut CUexternalSemaphore, - semHandleDesc: *const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC, - ) -> CUresult; -} -extern "C" { - pub fn cuSignalExternalSemaphoresAsync_ptsz( - extSemArray: *const CUexternalSemaphore, - paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, - numExtSems: ::std::os::raw::c_uint, - stream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuWaitExternalSemaphoresAsync_ptsz( - extSemArray: *const CUexternalSemaphore, - paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, - numExtSems: ::std::os::raw::c_uint, - stream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuDestroyExternalSemaphore(extSem: CUexternalSemaphore) -> CUresult; -} -extern "C" { - pub fn cuStreamWaitValue32_ptsz( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint32_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamWaitValue64_ptsz( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint64_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamWriteValue32_ptsz( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint32_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamWriteValue64_ptsz( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint64_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamBatchMemOp_ptsz( - stream: CUstream, - count: ::std::os::raw::c_uint, - paramArray: *mut CUstreamBatchMemOpParams, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuFuncGetAttribute( - pi: *mut ::std::os::raw::c_int, - attrib: CUfunction_attribute, - hfunc: CUfunction, - ) -> CUresult; -} -extern "C" { - pub fn cuFuncSetAttribute( - hfunc: CUfunction, - attrib: CUfunction_attribute, - value: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuFuncSetCacheConfig(hfunc: CUfunction, config: CUfunc_cache) -> CUresult; -} -extern "C" { - pub fn cuFuncSetSharedMemConfig(hfunc: CUfunction, config: CUsharedconfig) -> CUresult; -} -extern "C" { - pub fn cuFuncGetModule(hmod: *mut CUmodule, hfunc: CUfunction) -> CUresult; -} -extern "C" { - pub fn cuLaunchKernel_ptsz( - f: CUfunction, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - hStream: CUstream, - kernelParams: *mut *mut ::std::os::raw::c_void, - extra: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunchCooperativeKernel_ptsz( - f: CUfunction, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - hStream: CUstream, - kernelParams: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunchCooperativeKernelMultiDevice( - launchParamsList: *mut CUDA_LAUNCH_PARAMS, - numDevices: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunchHostFunc_ptsz( - hStream: CUstream, - fn_: CUhostFn, - userData: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuFuncSetBlockShape( - hfunc: CUfunction, - x: ::std::os::raw::c_int, - y: ::std::os::raw::c_int, - z: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuFuncSetSharedSize(hfunc: CUfunction, bytes: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuParamSetSize(hfunc: CUfunction, numbytes: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuParamSeti( - hfunc: CUfunction, - offset: ::std::os::raw::c_int, - value: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuParamSetf(hfunc: CUfunction, offset: ::std::os::raw::c_int, value: f32) -> CUresult; -} -extern "C" { - pub fn cuParamSetv( - hfunc: CUfunction, - offset: ::std::os::raw::c_int, - ptr: *mut ::std::os::raw::c_void, - numbytes: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunch(f: CUfunction) -> CUresult; -} -extern "C" { - pub fn cuLaunchGrid( - f: CUfunction, - grid_width: ::std::os::raw::c_int, - grid_height: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunchGridAsync( - f: CUfunction, - grid_width: ::std::os::raw::c_int, - grid_height: ::std::os::raw::c_int, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuParamSetTexRef( - hfunc: CUfunction, - texunit: ::std::os::raw::c_int, - hTexRef: CUtexref, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphCreate(phGraph: *mut CUgraph, flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuGraphAddKernelNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - nodeParams: *const CUDA_KERNEL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphKernelNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_KERNEL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphKernelNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_KERNEL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddMemcpyNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - copyParams: *const CUDA_MEMCPY3D, - ctx: CUcontext, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphMemcpyNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_MEMCPY3D, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphMemcpyNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_MEMCPY3D, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddMemsetNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - memsetParams: *const CUDA_MEMSET_NODE_PARAMS, - ctx: CUcontext, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphMemsetNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_MEMSET_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphMemsetNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_MEMSET_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddHostNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - nodeParams: *const CUDA_HOST_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphHostNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_HOST_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphHostNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_HOST_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddChildGraphNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - childGraph: CUgraph, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphChildGraphNodeGetGraph(hNode: CUgraphNode, phGraph: *mut CUgraph) -> CUresult; -} -extern "C" { - pub fn cuGraphAddEmptyNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddEventRecordNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - event: CUevent, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphEventRecordNodeGetEvent(hNode: CUgraphNode, event_out: *mut CUevent) -> CUresult; -} -extern "C" { - pub fn cuGraphEventRecordNodeSetEvent(hNode: CUgraphNode, event: CUevent) -> CUresult; -} -extern "C" { - pub fn cuGraphAddEventWaitNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - event: CUevent, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphEventWaitNodeGetEvent(hNode: CUgraphNode, event_out: *mut CUevent) -> CUresult; -} -extern "C" { - pub fn cuGraphEventWaitNodeSetEvent(hNode: CUgraphNode, event: CUevent) -> CUresult; -} -extern "C" { - pub fn cuGraphAddExternalSemaphoresSignalNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - nodeParams: *const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExternalSemaphoresSignalNodeGetParams( - hNode: CUgraphNode, - params_out: *mut CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExternalSemaphoresSignalNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddExternalSemaphoresWaitNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - nodeParams: *const CUDA_EXT_SEM_WAIT_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExternalSemaphoresWaitNodeGetParams( - hNode: CUgraphNode, - params_out: *mut CUDA_EXT_SEM_WAIT_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExternalSemaphoresWaitNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_EXT_SEM_WAIT_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddMemAllocNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - nodeParams: *mut CUDA_MEM_ALLOC_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphMemAllocNodeGetParams( - hNode: CUgraphNode, - params_out: *mut CUDA_MEM_ALLOC_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddMemFreeNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - dptr: CUdeviceptr, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphMemFreeNodeGetParams(hNode: CUgraphNode, dptr_out: *mut CUdeviceptr) -> CUresult; -} -extern "C" { - pub fn cuDeviceGraphMemTrim(device: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetGraphMemAttribute( - device: CUdevice, - attr: CUgraphMem_attribute, - value: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceSetGraphMemAttribute( - device: CUdevice, - attr: CUgraphMem_attribute, - value: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphClone(phGraphClone: *mut CUgraph, originalGraph: CUgraph) -> CUresult; -} -extern "C" { - pub fn cuGraphNodeFindInClone( - phNode: *mut CUgraphNode, - hOriginalNode: CUgraphNode, - hClonedGraph: CUgraph, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphNodeGetType(hNode: CUgraphNode, type_: *mut CUgraphNodeType) -> CUresult; -} -extern "C" { - pub fn cuGraphGetNodes( - hGraph: CUgraph, - nodes: *mut CUgraphNode, - numNodes: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphGetRootNodes( - hGraph: CUgraph, - rootNodes: *mut CUgraphNode, - numRootNodes: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphGetEdges( - hGraph: CUgraph, - from: *mut CUgraphNode, - to: *mut CUgraphNode, - numEdges: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphNodeGetDependencies( - hNode: CUgraphNode, - dependencies: *mut CUgraphNode, - numDependencies: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphNodeGetDependentNodes( - hNode: CUgraphNode, - dependentNodes: *mut CUgraphNode, - numDependentNodes: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphAddDependencies( - hGraph: CUgraph, - from: *const CUgraphNode, - to: *const CUgraphNode, - numDependencies: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphRemoveDependencies( - hGraph: CUgraph, - from: *const CUgraphNode, - to: *const CUgraphNode, - numDependencies: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphDestroyNode(hNode: CUgraphNode) -> CUresult; -} -extern "C" { - pub fn cuGraphInstantiate_v2( - phGraphExec: *mut CUgraphExec, - hGraph: CUgraph, - phErrorNode: *mut CUgraphNode, - logBuffer: *mut ::std::os::raw::c_char, - bufferSize: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphInstantiateWithFlags( - phGraphExec: *mut CUgraphExec, - hGraph: CUgraph, - flags: ::std::os::raw::c_ulonglong, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecKernelNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - nodeParams: *const CUDA_KERNEL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecMemcpyNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - copyParams: *const CUDA_MEMCPY3D, - ctx: CUcontext, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecMemsetNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - memsetParams: *const CUDA_MEMSET_NODE_PARAMS, - ctx: CUcontext, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecHostNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - nodeParams: *const CUDA_HOST_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecChildGraphNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - childGraph: CUgraph, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecEventRecordNodeSetEvent( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - event: CUevent, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecEventWaitNodeSetEvent( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - event: CUevent, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecExternalSemaphoresSignalNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - nodeParams: *const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphExecExternalSemaphoresWaitNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - nodeParams: *const CUDA_EXT_SEM_WAIT_NODE_PARAMS, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphUpload_ptsz(hGraphExec: CUgraphExec, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuGraphLaunch_ptsz(hGraphExec: CUgraphExec, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuGraphExecDestroy(hGraphExec: CUgraphExec) -> CUresult; -} -extern "C" { - pub fn cuGraphDestroy(hGraph: CUgraph) -> CUresult; -} -extern "C" { - pub fn cuGraphExecUpdate( - hGraphExec: CUgraphExec, - hGraph: CUgraph, - hErrorNode_out: *mut CUgraphNode, - updateResult_out: *mut CUgraphExecUpdateResult, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphKernelNodeCopyAttributes(dst: CUgraphNode, src: CUgraphNode) -> CUresult; -} -extern "C" { - pub fn cuGraphKernelNodeGetAttribute( - hNode: CUgraphNode, - attr: CUkernelNodeAttrID, - value_out: *mut CUkernelNodeAttrValue, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphKernelNodeSetAttribute( - hNode: CUgraphNode, - attr: CUkernelNodeAttrID, - value: *const CUkernelNodeAttrValue, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphDebugDotPrint( - hGraph: CUgraph, - path: *const ::std::os::raw::c_char, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuUserObjectCreate( - object_out: *mut CUuserObject, - ptr: *mut ::std::os::raw::c_void, - destroy: CUhostFn, - initialRefcount: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuUserObjectRetain(object: CUuserObject, count: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuUserObjectRelease(object: CUuserObject, count: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuGraphRetainUserObject( - graph: CUgraph, - object: CUuserObject, - count: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphReleaseUserObject( - graph: CUgraph, - object: CUuserObject, - count: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuOccupancyMaxActiveBlocksPerMultiprocessor( - numBlocks: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSize: ::std::os::raw::c_int, - dynamicSMemSize: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( - numBlocks: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSize: ::std::os::raw::c_int, - dynamicSMemSize: usize, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuOccupancyMaxPotentialBlockSize( - minGridSize: *mut ::std::os::raw::c_int, - blockSize: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSizeToDynamicSMemSize: CUoccupancyB2DSize, - dynamicSMemSize: usize, - blockSizeLimit: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuOccupancyMaxPotentialBlockSizeWithFlags( - minGridSize: *mut ::std::os::raw::c_int, - blockSize: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSizeToDynamicSMemSize: CUoccupancyB2DSize, - dynamicSMemSize: usize, - blockSizeLimit: ::std::os::raw::c_int, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuOccupancyAvailableDynamicSMemPerBlock( - dynamicSmemSize: *mut usize, - func: CUfunction, - numBlocks: ::std::os::raw::c_int, - blockSize: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetArray( - hTexRef: CUtexref, - hArray: CUarray, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetMipmappedArray( - hTexRef: CUtexref, - hMipmappedArray: CUmipmappedArray, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetAddress_v2( - ByteOffset: *mut usize, - hTexRef: CUtexref, - dptr: CUdeviceptr, - bytes: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetAddress2D_v3( - hTexRef: CUtexref, - desc: *const CUDA_ARRAY_DESCRIPTOR, - dptr: CUdeviceptr, - Pitch: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetFormat( - hTexRef: CUtexref, - fmt: CUarray_format, - NumPackedComponents: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetAddressMode( - hTexRef: CUtexref, - dim: ::std::os::raw::c_int, - am: CUaddress_mode, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetFilterMode(hTexRef: CUtexref, fm: CUfilter_mode) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetMipmapFilterMode(hTexRef: CUtexref, fm: CUfilter_mode) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetMipmapLevelBias(hTexRef: CUtexref, bias: f32) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetMipmapLevelClamp( - hTexRef: CUtexref, - minMipmapLevelClamp: f32, - maxMipmapLevelClamp: f32, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetMaxAnisotropy( - hTexRef: CUtexref, - maxAniso: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetBorderColor(hTexRef: CUtexref, pBorderColor: *mut f32) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetFlags(hTexRef: CUtexref, Flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetAddress_v2(pdptr: *mut CUdeviceptr, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetArray(phArray: *mut CUarray, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetMipmappedArray( - phMipmappedArray: *mut CUmipmappedArray, - hTexRef: CUtexref, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetAddressMode( - pam: *mut CUaddress_mode, - hTexRef: CUtexref, - dim: ::std::os::raw::c_int, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetFilterMode(pfm: *mut CUfilter_mode, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetFormat( - pFormat: *mut CUarray_format, - pNumChannels: *mut ::std::os::raw::c_int, - hTexRef: CUtexref, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetMipmapFilterMode(pfm: *mut CUfilter_mode, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetMipmapLevelBias(pbias: *mut f32, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetMipmapLevelClamp( - pminMipmapLevelClamp: *mut f32, - pmaxMipmapLevelClamp: *mut f32, - hTexRef: CUtexref, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetMaxAnisotropy( - pmaxAniso: *mut ::std::os::raw::c_int, - hTexRef: CUtexref, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetBorderColor(pBorderColor: *mut f32, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetFlags(pFlags: *mut ::std::os::raw::c_uint, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefCreate(pTexRef: *mut CUtexref) -> CUresult; -} -extern "C" { - pub fn cuTexRefDestroy(hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuSurfRefSetArray( - hSurfRef: CUsurfref, - hArray: CUarray, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuSurfRefGetArray(phArray: *mut CUarray, hSurfRef: CUsurfref) -> CUresult; -} -extern "C" { - pub fn cuTexObjectCreate( - pTexObject: *mut CUtexObject, - pResDesc: *const CUDA_RESOURCE_DESC, - pTexDesc: *const CUDA_TEXTURE_DESC, - pResViewDesc: *const CUDA_RESOURCE_VIEW_DESC, - ) -> CUresult; -} -extern "C" { - pub fn cuTexObjectDestroy(texObject: CUtexObject) -> CUresult; -} -extern "C" { - pub fn cuTexObjectGetResourceDesc( - pResDesc: *mut CUDA_RESOURCE_DESC, - texObject: CUtexObject, - ) -> CUresult; -} -extern "C" { - pub fn cuTexObjectGetTextureDesc( - pTexDesc: *mut CUDA_TEXTURE_DESC, - texObject: CUtexObject, - ) -> CUresult; -} -extern "C" { - pub fn cuTexObjectGetResourceViewDesc( - pResViewDesc: *mut CUDA_RESOURCE_VIEW_DESC, - texObject: CUtexObject, - ) -> CUresult; -} -extern "C" { - pub fn cuSurfObjectCreate( - pSurfObject: *mut CUsurfObject, - pResDesc: *const CUDA_RESOURCE_DESC, - ) -> CUresult; -} -extern "C" { - pub fn cuSurfObjectDestroy(surfObject: CUsurfObject) -> CUresult; -} -extern "C" { - pub fn cuSurfObjectGetResourceDesc( - pResDesc: *mut CUDA_RESOURCE_DESC, - surfObject: CUsurfObject, - ) -> CUresult; -} -extern "C" { - pub fn cuDeviceCanAccessPeer( - canAccessPeer: *mut ::std::os::raw::c_int, - dev: CUdevice, - peerDev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuCtxEnablePeerAccess(peerContext: CUcontext, Flags: ::std::os::raw::c_uint) - -> CUresult; -} -extern "C" { - pub fn cuCtxDisablePeerAccess(peerContext: CUcontext) -> CUresult; -} -extern "C" { - pub fn cuDeviceGetP2PAttribute( - value: *mut ::std::os::raw::c_int, - attrib: CUdevice_P2PAttribute, - srcDevice: CUdevice, - dstDevice: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsUnregisterResource(resource: CUgraphicsResource) -> CUresult; -} -extern "C" { - pub fn cuGraphicsSubResourceGetMappedArray( - pArray: *mut CUarray, - resource: CUgraphicsResource, - arrayIndex: ::std::os::raw::c_uint, - mipLevel: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsResourceGetMappedMipmappedArray( - pMipmappedArray: *mut CUmipmappedArray, - resource: CUgraphicsResource, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsResourceGetMappedPointer_v2( - pDevPtr: *mut CUdeviceptr, - pSize: *mut usize, - resource: CUgraphicsResource, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsResourceSetMapFlags_v2( - resource: CUgraphicsResource, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsMapResources_ptsz( - count: ::std::os::raw::c_uint, - resources: *mut CUgraphicsResource, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsUnmapResources_ptsz( - count: ::std::os::raw::c_uint, - resources: *mut CUgraphicsResource, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuGetProcAddress( - symbol: *const ::std::os::raw::c_char, - pfn: *mut *mut ::std::os::raw::c_void, - cudaVersion: ::std::os::raw::c_int, - flags: cuuint64_t, - ) -> CUresult; -} -extern "C" { - pub fn cuGetExportTable( - ppExportTable: *mut *const ::std::os::raw::c_void, - pExportTableId: *const CUuuid, - ) -> CUresult; -} -extern "C" { - pub fn cuMemHostRegister( - p: *mut ::std::os::raw::c_void, - bytesize: usize, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsResourceSetMapFlags( - resource: CUgraphicsResource, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkCreate( - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - stateOut: *mut CUlinkState, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkAddData( - state: CUlinkState, - type_: CUjitInputType, - data: *mut ::std::os::raw::c_void, - size: usize, - name: *const ::std::os::raw::c_char, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuLinkAddFile( - state: CUlinkState, - type_: CUjitInputType, - path: *const ::std::os::raw::c_char, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetAddress2D_v2( - hTexRef: CUtexref, - desc: *const CUDA_ARRAY_DESCRIPTOR, - dptr: CUdeviceptr, - Pitch: usize, - ) -> CUresult; -} -#[repr(transparent)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUdeviceptr_v1(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_MEMCPY2D_v1_st { - pub srcXInBytes: ::std::os::raw::c_uint, - pub srcY: ::std::os::raw::c_uint, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr_v1, - pub srcArray: CUarray, - pub srcPitch: ::std::os::raw::c_uint, - pub dstXInBytes: ::std::os::raw::c_uint, - pub dstY: ::std::os::raw::c_uint, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr_v1, - pub dstArray: CUarray, - pub dstPitch: ::std::os::raw::c_uint, - pub WidthInBytes: ::std::os::raw::c_uint, - pub Height: ::std::os::raw::c_uint, -} -pub type CUDA_MEMCPY2D_v1 = CUDA_MEMCPY2D_v1_st; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_MEMCPY3D_v1_st { - pub srcXInBytes: ::std::os::raw::c_uint, - pub srcY: ::std::os::raw::c_uint, - pub srcZ: ::std::os::raw::c_uint, - pub srcLOD: ::std::os::raw::c_uint, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr_v1, - pub srcArray: CUarray, - pub reserved0: *mut ::std::os::raw::c_void, - pub srcPitch: ::std::os::raw::c_uint, - pub srcHeight: ::std::os::raw::c_uint, - pub dstXInBytes: ::std::os::raw::c_uint, - pub dstY: ::std::os::raw::c_uint, - pub dstZ: ::std::os::raw::c_uint, - pub dstLOD: ::std::os::raw::c_uint, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr_v1, - pub dstArray: CUarray, - pub reserved1: *mut ::std::os::raw::c_void, - pub dstPitch: ::std::os::raw::c_uint, - pub dstHeight: ::std::os::raw::c_uint, - pub WidthInBytes: ::std::os::raw::c_uint, - pub Height: ::std::os::raw::c_uint, - pub Depth: ::std::os::raw::c_uint, -} -pub type CUDA_MEMCPY3D_v1 = CUDA_MEMCPY3D_v1_st; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_ARRAY_DESCRIPTOR_v1_st { - pub Width: ::std::os::raw::c_uint, - pub Height: ::std::os::raw::c_uint, - pub Format: CUarray_format, - pub NumChannels: ::std::os::raw::c_uint, -} -pub type CUDA_ARRAY_DESCRIPTOR_v1 = CUDA_ARRAY_DESCRIPTOR_v1_st; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUDA_ARRAY3D_DESCRIPTOR_v1_st { - pub Width: ::std::os::raw::c_uint, - pub Height: ::std::os::raw::c_uint, - pub Depth: ::std::os::raw::c_uint, - pub Format: CUarray_format, - pub NumChannels: ::std::os::raw::c_uint, - pub Flags: ::std::os::raw::c_uint, -} -pub type CUDA_ARRAY3D_DESCRIPTOR_v1 = CUDA_ARRAY3D_DESCRIPTOR_v1_st; -extern "C" { - pub fn cuDeviceTotalMem(bytes: *mut ::std::os::raw::c_uint, dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuCtxCreate( - pctx: *mut CUcontext, - flags: ::std::os::raw::c_uint, - dev: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuModuleGetGlobal( - dptr: *mut CUdeviceptr_v1, - bytes: *mut ::std::os::raw::c_uint, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, - ) -> CUresult; -} -extern "C" { - pub fn cuMemGetInfo( - free: *mut ::std::os::raw::c_uint, - total: *mut ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemAlloc(dptr: *mut CUdeviceptr_v1, bytesize: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuMemAllocPitch( - dptr: *mut CUdeviceptr_v1, - pPitch: *mut ::std::os::raw::c_uint, - WidthInBytes: ::std::os::raw::c_uint, - Height: ::std::os::raw::c_uint, - ElementSizeBytes: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemFree(dptr: CUdeviceptr_v1) -> CUresult; -} -extern "C" { - pub fn cuMemGetAddressRange( - pbase: *mut CUdeviceptr_v1, - psize: *mut ::std::os::raw::c_uint, - dptr: CUdeviceptr_v1, - ) -> CUresult; -} -extern "C" { - pub fn cuMemAllocHost( - pp: *mut *mut ::std::os::raw::c_void, - bytesize: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemHostGetDevicePointer( - pdptr: *mut CUdeviceptr_v1, - p: *mut ::std::os::raw::c_void, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoD( - dstDevice: CUdeviceptr_v1, - srcHost: *const ::std::os::raw::c_void, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoH( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr_v1, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoD( - dstDevice: CUdeviceptr_v1, - srcDevice: CUdeviceptr_v1, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoA( - dstArray: CUarray, - dstOffset: ::std::os::raw::c_uint, - srcDevice: CUdeviceptr_v1, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoD( - dstDevice: CUdeviceptr_v1, - srcArray: CUarray, - srcOffset: ::std::os::raw::c_uint, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoA( - dstArray: CUarray, - dstOffset: ::std::os::raw::c_uint, - srcHost: *const ::std::os::raw::c_void, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoH( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: ::std::os::raw::c_uint, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoA( - dstArray: CUarray, - dstOffset: ::std::os::raw::c_uint, - srcArray: CUarray, - srcOffset: ::std::os::raw::c_uint, - ByteCount: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoAAsync( - dstArray: CUarray, - dstOffset: ::std::os::raw::c_uint, - srcHost: *const ::std::os::raw::c_void, - ByteCount: ::std::os::raw::c_uint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoHAsync( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: ::std::os::raw::c_uint, - ByteCount: ::std::os::raw::c_uint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2D(pCopy: *const CUDA_MEMCPY2D_v1) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2DUnaligned(pCopy: *const CUDA_MEMCPY2D_v1) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3D(pCopy: *const CUDA_MEMCPY3D_v1) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoDAsync( - dstDevice: CUdeviceptr_v1, - srcHost: *const ::std::os::raw::c_void, - ByteCount: ::std::os::raw::c_uint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoHAsync( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr_v1, - ByteCount: ::std::os::raw::c_uint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoDAsync( - dstDevice: CUdeviceptr_v1, - srcDevice: CUdeviceptr_v1, - ByteCount: ::std::os::raw::c_uint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2DAsync(pCopy: *const CUDA_MEMCPY2D_v1, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3DAsync(pCopy: *const CUDA_MEMCPY3D_v1, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemsetD8( - dstDevice: CUdeviceptr_v1, - uc: ::std::os::raw::c_uchar, - N: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD16( - dstDevice: CUdeviceptr_v1, - us: ::std::os::raw::c_ushort, - N: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD32( - dstDevice: CUdeviceptr_v1, - ui: ::std::os::raw::c_uint, - N: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D8( - dstDevice: CUdeviceptr_v1, - dstPitch: ::std::os::raw::c_uint, - uc: ::std::os::raw::c_uchar, - Width: ::std::os::raw::c_uint, - Height: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D16( - dstDevice: CUdeviceptr_v1, - dstPitch: ::std::os::raw::c_uint, - us: ::std::os::raw::c_ushort, - Width: ::std::os::raw::c_uint, - Height: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D32( - dstDevice: CUdeviceptr_v1, - dstPitch: ::std::os::raw::c_uint, - ui: ::std::os::raw::c_uint, - Width: ::std::os::raw::c_uint, - Height: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuArrayCreate( - pHandle: *mut CUarray, - pAllocateArray: *const CUDA_ARRAY_DESCRIPTOR_v1, - ) -> CUresult; -} -extern "C" { - pub fn cuArrayGetDescriptor( - pArrayDescriptor: *mut CUDA_ARRAY_DESCRIPTOR_v1, - hArray: CUarray, - ) -> CUresult; -} -extern "C" { - pub fn cuArray3DCreate( - pHandle: *mut CUarray, - pAllocateArray: *const CUDA_ARRAY3D_DESCRIPTOR_v1, - ) -> CUresult; -} -extern "C" { - pub fn cuArray3DGetDescriptor( - pArrayDescriptor: *mut CUDA_ARRAY3D_DESCRIPTOR_v1, - hArray: CUarray, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetAddress( - ByteOffset: *mut ::std::os::raw::c_uint, - hTexRef: CUtexref, - dptr: CUdeviceptr_v1, - bytes: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefSetAddress2D( - hTexRef: CUtexref, - desc: *const CUDA_ARRAY_DESCRIPTOR_v1, - dptr: CUdeviceptr_v1, - Pitch: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuTexRefGetAddress(pdptr: *mut CUdeviceptr_v1, hTexRef: CUtexref) -> CUresult; -} -extern "C" { - pub fn cuGraphicsResourceGetMappedPointer( - pDevPtr: *mut CUdeviceptr_v1, - pSize: *mut ::std::os::raw::c_uint, - resource: CUgraphicsResource, - ) -> CUresult; -} -extern "C" { - pub fn cuCtxDestroy(ctx: CUcontext) -> CUresult; -} -extern "C" { - pub fn cuCtxPopCurrent(pctx: *mut CUcontext) -> CUresult; -} -extern "C" { - pub fn cuCtxPushCurrent(ctx: CUcontext) -> CUresult; -} -extern "C" { - pub fn cuStreamDestroy(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuEventDestroy(hEvent: CUevent) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxRelease(dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxReset(dev: CUdevice) -> CUresult; -} -extern "C" { - pub fn cuDevicePrimaryCtxSetFlags(dev: CUdevice, flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoD_v2( - dstDevice: CUdeviceptr, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoH_v2( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoD_v2( - dstDevice: CUdeviceptr, - srcDevice: CUdeviceptr, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoA_v2( - dstArray: CUarray, - dstOffset: usize, - srcDevice: CUdeviceptr, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoD_v2( - dstDevice: CUdeviceptr, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoA_v2( - dstArray: CUarray, - dstOffset: usize, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoH_v2( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoA_v2( - dstArray: CUarray, - dstOffset: usize, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoAAsync_v2( - dstArray: CUarray, - dstOffset: usize, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAtoHAsync_v2( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2D_v2(pCopy: *const CUDA_MEMCPY2D) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2DUnaligned_v2(pCopy: *const CUDA_MEMCPY2D) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3D_v2(pCopy: *const CUDA_MEMCPY3D) -> CUresult; -} -extern "C" { - pub fn cuMemcpyHtoDAsync_v2( - dstDevice: CUdeviceptr, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoHAsync_v2( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyDtoDAsync_v2( - dstDevice: CUdeviceptr, - srcDevice: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy2DAsync_v2(pCopy: *const CUDA_MEMCPY2D, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3DAsync_v2(pCopy: *const CUDA_MEMCPY3D, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemsetD8_v2(dstDevice: CUdeviceptr, uc: ::std::os::raw::c_uchar, N: usize) - -> CUresult; -} -extern "C" { - pub fn cuMemsetD16_v2( - dstDevice: CUdeviceptr, - us: ::std::os::raw::c_ushort, - N: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD32_v2(dstDevice: CUdeviceptr, ui: ::std::os::raw::c_uint, N: usize) - -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D8_v2( - dstDevice: CUdeviceptr, - dstPitch: usize, - uc: ::std::os::raw::c_uchar, - Width: usize, - Height: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D16_v2( - dstDevice: CUdeviceptr, - dstPitch: usize, - us: ::std::os::raw::c_ushort, - Width: usize, - Height: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D32_v2( - dstDevice: CUdeviceptr, - dstPitch: usize, - ui: ::std::os::raw::c_uint, - Width: usize, - Height: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy(dst: CUdeviceptr, src: CUdeviceptr, ByteCount: usize) -> CUresult; -} -extern "C" { - pub fn cuMemcpyAsync( - dst: CUdeviceptr, - src: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyPeer( - dstDevice: CUdeviceptr, - dstContext: CUcontext, - srcDevice: CUdeviceptr, - srcContext: CUcontext, - ByteCount: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpyPeerAsync( - dstDevice: CUdeviceptr, - dstContext: CUcontext, - srcDevice: CUdeviceptr, - srcContext: CUcontext, - ByteCount: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3DPeer(pCopy: *const CUDA_MEMCPY3D_PEER) -> CUresult; -} -extern "C" { - pub fn cuMemcpy3DPeerAsync(pCopy: *const CUDA_MEMCPY3D_PEER, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemsetD8Async( - dstDevice: CUdeviceptr, - uc: ::std::os::raw::c_uchar, - N: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD16Async( - dstDevice: CUdeviceptr, - us: ::std::os::raw::c_ushort, - N: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD32Async( - dstDevice: CUdeviceptr, - ui: ::std::os::raw::c_uint, - N: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D8Async( - dstDevice: CUdeviceptr, - dstPitch: usize, - uc: ::std::os::raw::c_uchar, - Width: usize, - Height: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D16Async( - dstDevice: CUdeviceptr, - dstPitch: usize, - us: ::std::os::raw::c_ushort, - Width: usize, - Height: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemsetD2D32Async( - dstDevice: CUdeviceptr, - dstPitch: usize, - ui: ::std::os::raw::c_uint, - Width: usize, - Height: usize, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamGetPriority(hStream: CUstream, priority: *mut ::std::os::raw::c_int) - -> CUresult; -} -extern "C" { - pub fn cuStreamGetFlags(hStream: CUstream, flags: *mut ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuStreamGetCtx(hStream: CUstream, pctx: *mut CUcontext) -> CUresult; -} -extern "C" { - pub fn cuStreamWaitEvent( - hStream: CUstream, - hEvent: CUevent, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamAddCallback( - hStream: CUstream, - callback: CUstreamCallback, - userData: *mut ::std::os::raw::c_void, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamAttachMemAsync( - hStream: CUstream, - dptr: CUdeviceptr, - length: usize, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamQuery(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamSynchronize(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuEventRecord(hEvent: CUevent, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuEventRecordWithFlags( - hEvent: CUevent, - hStream: CUstream, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunchKernel( - f: CUfunction, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - hStream: CUstream, - kernelParams: *mut *mut ::std::os::raw::c_void, - extra: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunchHostFunc( - hStream: CUstream, - fn_: CUhostFn, - userData: *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsMapResources( - count: ::std::os::raw::c_uint, - resources: *mut CUgraphicsResource, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsUnmapResources( - count: ::std::os::raw::c_uint, - resources: *mut CUgraphicsResource, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamWriteValue32( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint32_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamWaitValue32( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint32_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamWriteValue64( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint64_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamWaitValue64( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint64_t, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamBatchMemOp( - stream: CUstream, - count: ::std::os::raw::c_uint, - paramArray: *mut CUstreamBatchMemOpParams, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuMemPrefetchAsync( - devPtr: CUdeviceptr, - count: usize, - dstDevice: CUdevice, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuLaunchCooperativeKernel( - f: CUfunction, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - hStream: CUstream, - kernelParams: *mut *mut ::std::os::raw::c_void, - ) -> CUresult; -} -extern "C" { - pub fn cuSignalExternalSemaphoresAsync( - extSemArray: *const CUexternalSemaphore, - paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, - numExtSems: ::std::os::raw::c_uint, - stream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuWaitExternalSemaphoresAsync( - extSemArray: *const CUexternalSemaphore, - paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, - numExtSems: ::std::os::raw::c_uint, - stream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamBeginCapture(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamBeginCapture_ptsz(hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamBeginCapture_v2(hStream: CUstream, mode: CUstreamCaptureMode) -> CUresult; -} -extern "C" { - pub fn cuStreamEndCapture(hStream: CUstream, phGraph: *mut CUgraph) -> CUresult; -} -extern "C" { - pub fn cuStreamIsCapturing( - hStream: CUstream, - captureStatus: *mut CUstreamCaptureStatus, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamGetCaptureInfo( - hStream: CUstream, - captureStatus_out: *mut CUstreamCaptureStatus, - id_out: *mut cuuint64_t, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamGetCaptureInfo_v2( - hStream: CUstream, - captureStatus_out: *mut CUstreamCaptureStatus, - id_out: *mut cuuint64_t, - graph_out: *mut CUgraph, - dependencies_out: *mut *const CUgraphNode, - numDependencies_out: *mut usize, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphUpload(hGraph: CUgraphExec, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuGraphLaunch(hGraph: CUgraphExec, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamCopyAttributes(dstStream: CUstream, srcStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuStreamGetAttribute( - hStream: CUstream, - attr: CUstreamAttrID, - value: *mut CUstreamAttrValue, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamSetAttribute( - hStream: CUstream, - attr: CUstreamAttrID, - param: *const CUstreamAttrValue, - ) -> CUresult; -} -extern "C" { - pub fn cuIpcOpenMemHandle( - pdptr: *mut CUdeviceptr, - handle: CUipcMemHandle, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphInstantiate( - phGraphExec: *mut CUgraphExec, - hGraph: CUgraph, - phErrorNode: *mut CUgraphNode, - logBuffer: *mut ::std::os::raw::c_char, - bufferSize: usize, - ) -> CUresult; -} -extern "C" { - pub fn cuMemMapArrayAsync( - mapInfoList: *mut CUarrayMapInfo, - count: ::std::os::raw::c_uint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuMemFreeAsync(dptr: CUdeviceptr, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemAllocAsync(dptr: *mut CUdeviceptr, bytesize: usize, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuMemAllocFromPoolAsync( - dptr: *mut CUdeviceptr, - bytesize: usize, - pool: CUmemoryPool, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuStreamUpdateCaptureDependencies( - hStream: CUstream, - dependencies: *mut CUgraphNode, - numDependencies: usize, - flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -pub type GLenum = ::std::os::raw::c_uint; -pub type GLuint = ::std::os::raw::c_uint; -pub type HGPUNV = *mut ::std::os::raw::c_void; -extern "C" { - pub fn cuGraphicsGLRegisterBuffer( - pCudaResource: *mut CUgraphicsResource, - buffer: GLuint, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuGraphicsGLRegisterImage( - pCudaResource: *mut CUgraphicsResource, - image: GLuint, - target: GLenum, - Flags: ::std::os::raw::c_uint, - ) -> CUresult; -} -extern "C" { - pub fn cuWGLGetDevice(pDevice: *mut CUdevice, hGpu: HGPUNV) -> CUresult; -} -impl CUGLDeviceList_enum { - pub const CU_GL_DEVICE_LIST_ALL: CUGLDeviceList_enum = CUGLDeviceList_enum(1); -} -impl CUGLDeviceList_enum { - pub const CU_GL_DEVICE_LIST_CURRENT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum(2); -} -impl CUGLDeviceList_enum { - pub const CU_GL_DEVICE_LIST_NEXT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUGLDeviceList_enum(pub ::std::os::raw::c_uint); -pub use self::CUGLDeviceList_enum as CUGLDeviceList; -extern "C" { - pub fn cuGLGetDevices_v2( - pCudaDeviceCount: *mut ::std::os::raw::c_uint, - pCudaDevices: *mut CUdevice, - cudaDeviceCount: ::std::os::raw::c_uint, - deviceList: CUGLDeviceList, - ) -> CUresult; -} -extern "C" { - pub fn cuGLCtxCreate_v2( - pCtx: *mut CUcontext, - Flags: ::std::os::raw::c_uint, - device: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuGLInit() -> CUresult; -} -extern "C" { - pub fn cuGLRegisterBufferObject(buffer: GLuint) -> CUresult; -} -extern "C" { - pub fn cuGLMapBufferObject_v2_ptds( - dptr: *mut CUdeviceptr, - size: *mut usize, - buffer: GLuint, - ) -> CUresult; -} -extern "C" { - pub fn cuGLUnmapBufferObject(buffer: GLuint) -> CUresult; -} -extern "C" { - pub fn cuGLUnregisterBufferObject(buffer: GLuint) -> CUresult; -} -extern "C" { - pub fn cuGLSetBufferObjectMapFlags(buffer: GLuint, Flags: ::std::os::raw::c_uint) -> CUresult; -} -extern "C" { - pub fn cuGLMapBufferObjectAsync_v2_ptsz( - dptr: *mut CUdeviceptr, - size: *mut usize, - buffer: GLuint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuGLUnmapBufferObjectAsync(buffer: GLuint, hStream: CUstream) -> CUresult; -} -extern "C" { - pub fn cuGLGetDevices( - pCudaDeviceCount: *mut ::std::os::raw::c_uint, - pCudaDevices: *mut CUdevice, - cudaDeviceCount: ::std::os::raw::c_uint, - deviceList: CUGLDeviceList, - ) -> CUresult; -} -extern "C" { - pub fn cuGLMapBufferObject_v2( - dptr: *mut CUdeviceptr, - size: *mut usize, - buffer: GLuint, - ) -> CUresult; -} -extern "C" { - pub fn cuGLMapBufferObjectAsync_v2( - dptr: *mut CUdeviceptr, - size: *mut usize, - buffer: GLuint, - hStream: CUstream, - ) -> CUresult; -} -extern "C" { - pub fn cuGLCtxCreate( - pCtx: *mut CUcontext, - Flags: ::std::os::raw::c_uint, - device: CUdevice, - ) -> CUresult; -} -extern "C" { - pub fn cuGLMapBufferObject( - dptr: *mut CUdeviceptr_v1, - size: *mut ::std::os::raw::c_uint, - buffer: GLuint, - ) -> CUresult; -} -extern "C" { - pub fn cuGLMapBufferObjectAsync( - dptr: *mut CUdeviceptr_v1, - size: *mut ::std::os::raw::c_uint, - buffer: GLuint, - hStream: CUstream, - ) -> CUresult; -} +// Generated automatically by zluda_bindgen +// DO NOT EDIT MANUALLY +#![allow(warnings)] +extern "system" { + /** \brief Gets the string description of an error code + + Sets \p *pStr to the address of a NULL-terminated string description + of the error code \p error. + If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE + will be returned and \p *pStr will be set to the NULL address. + + \param error - Error code to convert to string + \param pStr - Address of the string pointer. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::CUresult, + ::cudaGetErrorString*/ + fn cuGetErrorString( + error: cuda_types::CUresult, + pStr: *mut *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Gets the string representation of an error code enum name + + Sets \p *pStr to the address of a NULL-terminated string representation + of the name of the enum error code \p error. + If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE + will be returned and \p *pStr will be set to the NULL address. + + \param error - Error code to convert to string + \param pStr - Address of the string pointer. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::CUresult, + ::cudaGetErrorName*/ + fn cuGetErrorName( + error: cuda_types::CUresult, + pStr: *mut *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Initialize the CUDA driver API + Initializes the driver API and must be called before any other function from + the driver API in the current process. Currently, the \p Flags parameter must be 0. If ::cuInit() + has not been called, any function from the driver API will return + ::CUDA_ERROR_NOT_INITIALIZED. + + \param Flags - Initialization flag for CUDA. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH, + ::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE + \notefnerr*/ + fn cuInit(Flags: ::core::ffi::c_uint) -> cuda_types::CUresult; + /** \brief Returns the latest CUDA version supported by driver + + Returns in \p *driverVersion the version of CUDA supported by + the driver. The version is returned as + (1000 × major + 10 × minor). For example, CUDA 9.2 + would be represented by 9020. + + This function automatically returns ::CUDA_ERROR_INVALID_VALUE if + \p driverVersion is NULL. + + \param driverVersion - Returns the CUDA driver version + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cudaDriverGetVersion, + ::cudaRuntimeGetVersion*/ + fn cuDriverGetVersion( + driverVersion: *mut ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Returns a handle to a compute device + + Returns in \p *device a device handle given an ordinal in the range [0, + ::cuDeviceGetCount()-1]. + + \param device - Returned device handle + \param ordinal - Device number to get handle for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGetLuid, + ::cuDeviceTotalMem, + ::cuDeviceGetExecAffinitySupport*/ + fn cuDeviceGet( + device: *mut cuda_types::CUdevice, + ordinal: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Returns the number of compute-capable devices + + Returns in \p *count the number of devices with compute capability greater + than or equal to 2.0 that are available for execution. If there is no such + device, ::cuDeviceGetCount() returns 0. + + \param count - Returned number of compute-capable devices + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGetLuid, + ::cuDeviceGet, + ::cuDeviceTotalMem, + ::cuDeviceGetExecAffinitySupport, + ::cudaGetDeviceCount*/ + fn cuDeviceGetCount(count: *mut ::core::ffi::c_int) -> cuda_types::CUresult; + /** \brief Returns an identifier string for the device + + Returns an ASCII string identifying the device \p dev in the NULL-terminated + string pointed to by \p name. \p len specifies the maximum length of the + string that may be returned. + + \param name - Returned identifier string for the device + \param len - Maximum length of string to store in \p name + \param dev - Device to get identifier string for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetUuid, + ::cuDeviceGetLuid, + ::cuDeviceGetCount, + ::cuDeviceGet, + ::cuDeviceTotalMem, + ::cuDeviceGetExecAffinitySupport, + ::cudaGetDeviceProperties*/ + fn cuDeviceGetName( + name: *mut ::core::ffi::c_char, + len: ::core::ffi::c_int, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Return an UUID for the device + + Note there is a later version of this API, ::cuDeviceGetUuid_v2. It will + supplant this version in 12.0, which is retained for minor version compatibility. + + Returns 16-octets identifying the device \p dev in the structure + pointed by the \p uuid. + + \param uuid - Returned UUID + \param dev - Device to get identifier string for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetUuid_v2 + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetLuid, + ::cuDeviceGet, + ::cuDeviceTotalMem, + ::cuDeviceGetExecAffinitySupport, + ::cudaGetDeviceProperties*/ + fn cuDeviceGetUuid( + uuid: *mut cuda_types::CUuuid, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Return an UUID for the device (11.4+) + + Returns 16-octets identifying the device \p dev in the structure + pointed by the \p uuid. If the device is in MIG mode, returns its + MIG UUID which uniquely identifies the subscribed MIG compute instance. + + \param uuid - Returned UUID + \param dev - Device to get identifier string for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetLuid, + ::cuDeviceGet, + ::cuDeviceTotalMem, + ::cudaGetDeviceProperties*/ + fn cuDeviceGetUuid_v2( + uuid: *mut cuda_types::CUuuid, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Return an LUID and device node mask for the device + + Return identifying information (\p luid and \p deviceNodeMask) to allow + matching device with graphics APIs. + + \param luid - Returned LUID + \param deviceNodeMask - Returned device node mask + \param dev - Device to get identifier string for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGet, + ::cuDeviceTotalMem, + ::cuDeviceGetExecAffinitySupport, + ::cudaGetDeviceProperties*/ + fn cuDeviceGetLuid( + luid: *mut ::core::ffi::c_char, + deviceNodeMask: *mut ::core::ffi::c_uint, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns the total amount of memory on the device + + Returns in \p *bytes the total amount of memory available on the device + \p dev in bytes. + + \param bytes - Returned memory available on device in bytes + \param dev - Device handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGet, + ::cuDeviceGetExecAffinitySupport, + ::cudaMemGetInfo*/ + fn cuDeviceTotalMem_v2( + bytes: *mut usize, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size. + + Returns in \p maxWidthInElements the maximum number of texture elements allocatable in a 1D linear texture + for given \p format and \p numChannels. + + \param maxWidthInElements - Returned maximum number of texture elements allocatable for given \p format and \p numChannels. + \param format - Texture format. + \param numChannels - Number of channels per texture element. + \param dev - Device handle. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGet, + ::cudaMemGetInfo, + ::cuDeviceTotalMem*/ + fn cuDeviceGetTexture1DLinearMaxWidth( + maxWidthInElements: *mut usize, + format: cuda_types::CUarray_format, + numChannels: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns information about the device + + Returns in \p *pi the integer value of the attribute \p attrib on device + \p dev. The supported attributes are: + - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: Maximum number of threads per + block; + - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: Maximum x-dimension of a block + - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: Maximum y-dimension of a block + - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: Maximum z-dimension of a block + - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: Maximum x-dimension of a grid + - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: Maximum y-dimension of a grid + - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: Maximum z-dimension of a grid + - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: Maximum amount of + shared memory available to a thread block in bytes + - ::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: Memory available on device for + __constant__ variables in a CUDA C kernel in bytes + - ::CU_DEVICE_ATTRIBUTE_WARP_SIZE: Warp size in threads + - ::CU_DEVICE_ATTRIBUTE_MAX_PITCH: Maximum pitch in bytes allowed by the + memory copy functions that involve memory regions allocated through + ::cuMemAllocPitch() + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: Maximum 1D + texture width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: Maximum width + for a 1D texture bound to linear memory + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: Maximum + mipmapped 1D texture width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: Maximum 2D + texture width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: Maximum 2D + texture height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: Maximum width + for a 2D texture bound to linear memory + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: Maximum height + for a 2D texture bound to linear memory + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: Maximum pitch + in bytes for a 2D texture bound to linear memory + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: Maximum + mipmapped 2D texture width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: Maximum + mipmapped 2D texture height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: Maximum 3D + texture width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: Maximum 3D + texture height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: Maximum 3D + texture depth + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: + Alternate maximum 3D texture width, 0 if no alternate + maximum 3D texture size is supported + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: + Alternate maximum 3D texture height, 0 if no alternate + maximum 3D texture size is supported + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: + Alternate maximum 3D texture depth, 0 if no alternate + maximum 3D texture size is supported + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: + Maximum cubemap texture width or height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: + Maximum 1D layered texture width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: + Maximum layers in a 1D layered texture + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: + Maximum 2D layered texture width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: + Maximum 2D layered texture height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: + Maximum layers in a 2D layered texture + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: + Maximum cubemap layered texture width or height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: + Maximum layers in a cubemap layered texture + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: + Maximum 1D surface width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: + Maximum 2D surface width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: + Maximum 2D surface height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: + Maximum 3D surface width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: + Maximum 3D surface height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: + Maximum 3D surface depth + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: + Maximum 1D layered surface width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: + Maximum layers in a 1D layered surface + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: + Maximum 2D layered surface width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: + Maximum 2D layered surface height + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: + Maximum layers in a 2D layered surface + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: + Maximum cubemap surface width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: + Maximum cubemap layered surface width + - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: + Maximum layers in a cubemap layered surface + - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: Maximum number of 32-bit + registers available to a thread block + - ::CU_DEVICE_ATTRIBUTE_CLOCK_RATE: The typical clock frequency in kilohertz + - ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: Alignment requirement; texture + base addresses aligned to ::textureAlign bytes do not need an offset + applied to texture fetches + - ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: Pitch alignment requirement + for 2D texture references bound to pitched memory + - ::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: 1 if the device can concurrently copy + memory between host and device while executing a kernel, or 0 if not + - ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: Number of multiprocessors on + the device + - ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: 1 if there is a run time limit + for kernels executed on the device, or 0 if not + - ::CU_DEVICE_ATTRIBUTE_INTEGRATED: 1 if the device is integrated with the + memory subsystem, or 0 if not + - ::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: 1 if the device can map host + memory into the CUDA address space, or 0 if not + - ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: Compute mode that device is currently + in. Available modes are as follows: + - ::CU_COMPUTEMODE_DEFAULT: Default mode - Device is not restricted and + can have multiple CUDA contexts present at a single time. + - ::CU_COMPUTEMODE_PROHIBITED: Compute-prohibited mode - Device is + prohibited from creating new CUDA contexts. + - ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS: Compute-exclusive-process mode - Device + can have only one context used by a single process at a time. + - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: 1 if the device supports + executing multiple kernels within the same context simultaneously, or 0 if + not. It is not guaranteed that multiple kernels will be resident + on the device concurrently so this feature should not be relied upon for + correctness. + - ::CU_DEVICE_ATTRIBUTE_ECC_ENABLED: 1 if error correction is enabled on the + device, 0 if error correction is disabled or not supported by the device + - ::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: PCI bus identifier of the device + - ::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: PCI device (also known as slot) identifier + of the device + - ::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: PCI domain identifier of the device + - ::CU_DEVICE_ATTRIBUTE_TCC_DRIVER: 1 if the device is using a TCC driver. TCC + is only available on Tesla hardware running Windows Vista or later + - ::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: Peak memory clock frequency in kilohertz + - ::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: Global memory bus width in bits + - ::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: Size of L2 cache in bytes. 0 if the device doesn't have L2 cache + - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: Maximum resident threads per multiprocessor + - ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: 1 if the device shares a unified address space with + the host, or 0 if not + - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: Major compute capability version number + - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: Minor compute capability version number + - ::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: 1 if device supports caching globals + in L1 cache, 0 if caching globals in L1 cache is not supported by the device + - ::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: 1 if device supports caching locals + in L1 cache, 0 if caching locals in L1 cache is not supported by the device + - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: Maximum amount of + shared memory available to a multiprocessor in bytes; this amount is shared + by all thread blocks simultaneously resident on a multiprocessor + - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: Maximum number of 32-bit + registers available to a multiprocessor; this number is shared by all thread + blocks simultaneously resident on a multiprocessor + - ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: 1 if device supports allocating managed memory + on this system, 0 if allocating managed memory is not supported by the device on this system. + - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: 1 if device is on a multi-GPU board, 0 if not. + - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: Unique identifier for a group of devices + associated with the same board. Devices on the same multi-GPU board will share the same identifier. + - ::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: 1 if Link between the device and the host + supports native atomic operations. + - ::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: Ratio of single precision performance + (in floating-point operations per second) to double precision performance. + - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: Device supports coherently accessing + pageable memory without calling cudaHostRegister on it. + - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: Device can coherently access managed memory + concurrently with the CPU. + - ::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: Device supports Compute Preemption. + - ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: Device can access host registered + memory at the same virtual address as the CPU. + - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: The maximum per block shared memory size + supported on this device. This is the maximum value that can be opted into when using the cuFuncSetAttribute() or cuKernelSetAttribute() call. + For more details see ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES + - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: Device accesses pageable memory via the host's + page tables. + - ::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: The host can directly access managed memory on the device without migration. + - ::CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs + - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: Maximum number of thread blocks that can reside on a multiprocessor + - ::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: Device supports compressible memory allocation via ::cuMemCreate + - ::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: Maximum L2 persisting lines capacity setting in bytes + - ::CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: Maximum value of CUaccessPolicyWindow::num_bytes + - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate. + - ::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: Amount of shared memory per block reserved by CUDA driver in bytes + - ::CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED: Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays. + - ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU + - ::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs + - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED: Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS: The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum + - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING: GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here. + - ::CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES: Bitmask of handle types supported with mempool based IPC + - ::CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED: Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays. + + \param pi - Returned device attribute value + \param attrib - Device attribute to query + \param dev - Device handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGet, + ::cuDeviceTotalMem, + ::cuDeviceGetExecAffinitySupport, + ::cudaDeviceGetAttribute, + ::cudaGetDeviceProperties*/ + fn cuDeviceGetAttribute( + pi: *mut ::core::ffi::c_int, + attrib: cuda_types::CUdevice_attribute, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Return NvSciSync attributes that this device can support. + + Returns in \p nvSciSyncAttrList, the properties of NvSciSync that + this CUDA device, \p dev can support. The returned \p nvSciSyncAttrList + can be used to create an NvSciSync object that matches this device's capabilities. + + If NvSciSyncAttrKey_RequiredPerm field in \p nvSciSyncAttrList is + already set this API will return ::CUDA_ERROR_INVALID_VALUE. + + The applications should set \p nvSciSyncAttrList to a valid + NvSciSyncAttrList failing which this API will return + ::CUDA_ERROR_INVALID_HANDLE. + + The \p flags controls how applications intends to use + the NvSciSync created from the \p nvSciSyncAttrList. The valid flags are: + - ::CUDA_NVSCISYNC_ATTR_SIGNAL, specifies that the applications intends to + signal an NvSciSync on this CUDA device. + - ::CUDA_NVSCISYNC_ATTR_WAIT, specifies that the applications intends to + wait on an NvSciSync on this CUDA device. + + At least one of these flags must be set, failing which the API + returns ::CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal + to one another: a developer may set both these flags that allows to + set both wait and signal specific attributes in the same \p nvSciSyncAttrList. + + Note that this API updates the input \p nvSciSyncAttrList with values equivalent + to the following public attribute key-values: + NvSciSyncAttrKey_RequiredPerm is set to + - NvSciSyncAccessPerm_SignalOnly if ::CUDA_NVSCISYNC_ATTR_SIGNAL is set in \p flags. + - NvSciSyncAccessPerm_WaitOnly if ::CUDA_NVSCISYNC_ATTR_WAIT is set in \p flags. + - NvSciSyncAccessPerm_WaitSignal if both ::CUDA_NVSCISYNC_ATTR_WAIT and + ::CUDA_NVSCISYNC_ATTR_SIGNAL are set in \p flags. + NvSciSyncAttrKey_PrimitiveInfo is set to + - NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid \p device. + - NvSciSyncAttrValPrimitiveType_Syncpoint if \p device is a Tegra device. + - NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if \p device is GA10X+. + NvSciSyncAttrKey_GpuId is set to the same UUID that is returned for this + \p device from ::cuDeviceGetUuid. + + \param nvSciSyncAttrList - Return NvSciSync attributes supported. + \param dev - Valid Cuda Device to get NvSciSync attributes for. + \param flags - flags describing NvSciSync usage. + + \return + + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa + ::cuImportExternalSemaphore, + ::cuDestroyExternalSemaphore, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync*/ + fn cuDeviceGetNvSciSyncAttributes( + nvSciSyncAttrList: *mut ::core::ffi::c_void, + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Sets the current memory pool of a device + + The memory pool must be local to the specified device. + ::cuMemAllocAsync allocates from the current mempool of the provided stream's device. + By default, a device's current memory pool is its default memory pool. + + \note Use ::cuMemAllocFromPoolAsync to specify asynchronous allocations from a device different + than the one the stream runs on. + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolDestroy, ::cuMemAllocFromPoolAsync*/ + fn cuDeviceSetMemPool( + dev: cuda_types::CUdevice, + pool: cuda_types::CUmemoryPool, + ) -> cuda_types::CUresult; + /** \brief Gets the current mempool for a device + + Returns the last pool provided to ::cuDeviceSetMemPool for this device + or the device's default memory pool if ::cuDeviceSetMemPool has never been called. + By default the current mempool is the default mempool for a device. + Otherwise the returned pool must have been set with ::cuDeviceSetMemPool. + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate, ::cuDeviceSetMemPool*/ + fn cuDeviceGetMemPool( + pool: *mut cuda_types::CUmemoryPool, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns the default mempool of a device + + The default mempool of a device contains device memory from that device. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuMemAllocAsync, ::cuMemPoolTrimTo, ::cuMemPoolGetAttribute, ::cuMemPoolSetAttribute, cuMemPoolSetAccess, ::cuDeviceGetMemPool, ::cuMemPoolCreate*/ + fn cuDeviceGetDefaultMemPool( + pool_out: *mut cuda_types::CUmemoryPool, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns information about the execution affinity support of the device. + + Returns in \p *pi whether execution affinity type \p type is supported by device \p dev. + The supported types are: + - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: 1 if context with limited SMs is supported by the device, + or 0 if not; + + \param pi - 1 if the execution affinity type \p type is supported by the device, or 0 if not + \param type - Execution affinity type to query + \param dev - Device handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGet, + ::cuDeviceTotalMem*/ + fn cuDeviceGetExecAffinitySupport( + pi: *mut ::core::ffi::c_int, + type_: cuda_types::CUexecAffinityType, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Blocks until remote writes are visible to the specified scope + + Blocks until GPUDirect RDMA writes to the target context via mappings + created through APIs like nvidia_p2p_get_pages (see + https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are + visible to the specified scope. + + If the scope equals or lies within the scope indicated by + ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, the call + will be a no-op and can be safely omitted for performance. This can be + determined by comparing the numerical values between the two enums, with + smaller scopes having smaller values. + + Users may query support for this API via + ::CU_DEVICE_ATTRIBUTE_FLUSH_FLUSH_GPU_DIRECT_RDMA_OPTIONS. + + \param target - The target of the operation, see ::CUflushGPUDirectRDMAWritesTarget + \param scope - The scope of the operation, see ::CUflushGPUDirectRDMAWritesScope + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr +*/ + fn cuFlushGPUDirectRDMAWrites( + target: cuda_types::CUflushGPUDirectRDMAWritesTarget, + scope: cuda_types::CUflushGPUDirectRDMAWritesScope, + ) -> cuda_types::CUresult; + /** \brief Returns properties for a selected device + + \deprecated + + This function was deprecated as of CUDA 5.0 and replaced by ::cuDeviceGetAttribute(). + + Returns in \p *prop the properties of device \p dev. The ::CUdevprop + structure is defined as: + + \code +typedef struct CUdevprop_st { +int maxThreadsPerBlock; +int maxThreadsDim[3]; +int maxGridSize[3]; +int sharedMemPerBlock; +int totalConstantMemory; +int SIMDWidth; +int memPitch; +int regsPerBlock; +int clockRate; +int textureAlign +} CUdevprop; + \endcode + where: + + - ::maxThreadsPerBlock is the maximum number of threads per block; + - ::maxThreadsDim[3] is the maximum sizes of each dimension of a block; + - ::maxGridSize[3] is the maximum sizes of each dimension of a grid; + - ::sharedMemPerBlock is the total amount of shared memory available per + block in bytes; + - ::totalConstantMemory is the total amount of constant memory available on + the device in bytes; + - ::SIMDWidth is the warp size; + - ::memPitch is the maximum pitch allowed by the memory copy functions that + involve memory regions allocated through ::cuMemAllocPitch(); + - ::regsPerBlock is the total number of registers available per block; + - ::clockRate is the clock frequency in kilohertz; + - ::textureAlign is the alignment requirement; texture base addresses that + are aligned to ::textureAlign bytes do not need an offset applied to + texture fetches. + + \param prop - Returned properties of device + \param dev - Device to get properties for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGet, + ::cuDeviceTotalMem*/ + fn cuDeviceGetProperties( + prop: *mut cuda_types::CUdevprop, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns the compute capability of the device + + \deprecated + + This function was deprecated as of CUDA 5.0 and its functionality superseded + by ::cuDeviceGetAttribute(). + + Returns in \p *major and \p *minor the major and minor revision numbers that + define the compute capability of the device \p dev. + + \param major - Major revision number + \param minor - Minor revision number + \param dev - Device handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGetAttribute, + ::cuDeviceGetCount, + ::cuDeviceGetName, + ::cuDeviceGetUuid, + ::cuDeviceGet, + ::cuDeviceTotalMem*/ + fn cuDeviceComputeCapability( + major: *mut ::core::ffi::c_int, + minor: *mut ::core::ffi::c_int, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Retain the primary context on the GPU + + Retains the primary context on the device. + Once the user successfully retains the primary context, the primary context + will be active and available to the user until the user releases it + with ::cuDevicePrimaryCtxRelease() or resets it with ::cuDevicePrimaryCtxReset(). + Unlike ::cuCtxCreate() the newly retained context is not pushed onto the stack. + + Retaining the primary context for the first time will fail with ::CUDA_ERROR_UNKNOWN + if the compute mode of the device is ::CU_COMPUTEMODE_PROHIBITED. The function + ::cuDeviceGetAttribute() can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to + determine the compute mode of the device. + The nvidia-smi tool can be used to set the compute mode for + devices. Documentation for nvidia-smi can be obtained by passing a + -h option to it. + + Please note that the primary context always supports pinned allocations. Other + flags can be specified by ::cuDevicePrimaryCtxSetFlags(). + + \param pctx - Returned context handle of the new context + \param dev - Device for which primary context is requested + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuDevicePrimaryCtxRelease, + ::cuDevicePrimaryCtxSetFlags, + ::cuCtxCreate, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuDevicePrimaryCtxRetain( + pctx: *mut cuda_types::CUcontext, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Release the primary context on the GPU + + Releases the primary context interop on the device. + A retained context should always be released once the user is done using + it. The context is automatically reset once the last reference to it is + released. This behavior is different when the primary context was retained + by the CUDA runtime from CUDA 4.0 and earlier. In this case, the primary + context remains always active. + + Releasing a primary context that has not been previously retained will + fail with ::CUDA_ERROR_INVALID_CONTEXT. + + Please note that unlike ::cuCtxDestroy() this method does not pop the context + from stack in any circumstances. + + \param dev - Device which primary context is released + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa ::cuDevicePrimaryCtxRetain, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuDevicePrimaryCtxRelease_v2(dev: cuda_types::CUdevice) -> cuda_types::CUresult; + /** \brief Set flags for the primary context + + Sets the flags for the primary context on the device overwriting perviously + set ones. + + The three LSBs of the \p flags parameter can be used to control how the OS + thread, which owns the CUDA context at the time of an API call, interacts + with the OS scheduler when waiting for results from the GPU. Only one of + the scheduling flags can be set when creating a context. + + - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + results from the GPU. This can decrease latency when waiting for the GPU, + but may lower the performance of CPU threads if they are performing work in + parallel with the CUDA thread. + + - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + results from the GPU. This can increase latency when waiting for the GPU, + but can increase the performance of CPU threads performing work in parallel + with the GPU. + + - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + synchronization primitive when waiting for the GPU to finish work. + + - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + synchronization primitive when waiting for the GPU to finish work.
+ Deprecated: This flag was deprecated as of CUDA 4.0 and was + replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + + - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + uses a heuristic based on the number of active CUDA contexts in the + process \e C and the number of logical processors in the system \e P. If + \e C > \e P, then CUDA will yield to other OS threads when waiting for + the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + for low-powered devices. + + - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + after resizing local memory for a kernel. This can prevent thrashing by + local memory allocations when launching many kernels with high local + memory usage at the cost of potentially increased memory usage.
+ Deprecated: This flag is deprecated and the behavior enabled + by this flag is now the default and cannot be disabled. + + - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + be set during context creation to instruct CUDA to create a coredump if + this context raises an exception during execution. These environment variables + are described in the CUDA-GDB user guide under the "GPU core dump support" + section. + The initial settings will be taken from the global settings at the time of + context creation. The other settings that control coredump output can be + modified by calling ::cuCoredumpSetAttribute from the created context after + it becomes current. + + - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + variables, this flag can be set during context creation to instruct CUDA to + create a coredump if data is written to a certain pipe that is present in the + OS space. These environment variables are described in the CUDA-GDB user + guide under the "GPU core dump support" section. + It is important to note that the pipe name *must* be set with + ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + The initial settings will be taken from the global settings at the time of + context creation. The other settings that control coredump output can be + modified by calling ::cuCoredumpSetAttribute from the created context after + it becomes current. + + - ::CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory operations initiated + on this context will always synchronize. See further documentation in the + section titled "API Synchronization behavior" to learn more about cases when + synchronous memory operations can exhibit asynchronous behavior. + + \param dev - Device for which the primary context flags are set + \param flags - New flags for the device + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa ::cuDevicePrimaryCtxRetain, + ::cuDevicePrimaryCtxGetState, + ::cuCtxCreate, + ::cuCtxGetFlags, + ::cuCtxSetFlags, + ::cudaSetDeviceFlags*/ + fn cuDevicePrimaryCtxSetFlags_v2( + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Get the state of the primary context + + Returns in \p *flags the flags for the primary context of \p dev, and in + \p *active whether it is active. See ::cuDevicePrimaryCtxSetFlags for flag + values. + + \param dev - Device to get primary context flags for + \param flags - Pointer to store flags + \param active - Pointer to store context state; 0 = inactive, 1 = active + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa + ::cuDevicePrimaryCtxSetFlags, + ::cuCtxGetFlags, + ::cuCtxSetFlags, + ::cudaGetDeviceFlags*/ + fn cuDevicePrimaryCtxGetState( + dev: cuda_types::CUdevice, + flags: *mut ::core::ffi::c_uint, + active: *mut ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Destroy all allocations and reset all state on the primary context + + Explicitly destroys and cleans up all resources associated with the current + device in the current process. + + Note that it is responsibility of the calling function to ensure that no + other module in the process is using the device any more. For that reason + it is recommended to use ::cuDevicePrimaryCtxRelease() in most cases. + However it is safe for other modules to call ::cuDevicePrimaryCtxRelease() + even after resetting the device. + Resetting the primary context does not release it, an application that has + retained the primary context should explicitly release its usage. + + \param dev - Device for which primary context is destroyed + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE + \notefnerr + + \sa ::cuDevicePrimaryCtxRetain, + ::cuDevicePrimaryCtxRelease, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cudaDeviceReset*/ + fn cuDevicePrimaryCtxReset_v2(dev: cuda_types::CUdevice) -> cuda_types::CUresult; + /** \brief Create a CUDA context + + \note In most cases it is recommended to use ::cuDevicePrimaryCtxRetain. + + Creates a new CUDA context and associates it with the calling thread. The + \p flags parameter is described below. The context is created with a usage + count of 1 and the caller of ::cuCtxCreate() must call ::cuCtxDestroy() + when done using the context. If a context is already current to the thread, + it is supplanted by the newly created context and may be restored by a subsequent + call to ::cuCtxPopCurrent(). + + The three LSBs of the \p flags parameter can be used to control how the OS + thread, which owns the CUDA context at the time of an API call, interacts + with the OS scheduler when waiting for results from the GPU. Only one of + the scheduling flags can be set when creating a context. + + - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + results from the GPU. This can decrease latency when waiting for the GPU, + but may lower the performance of CPU threads if they are performing work in + parallel with the CUDA thread. + + - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + results from the GPU. This can increase latency when waiting for the GPU, + but can increase the performance of CPU threads performing work in parallel + with the GPU. + + - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + synchronization primitive when waiting for the GPU to finish work. + + - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + synchronization primitive when waiting for the GPU to finish work.
+ Deprecated: This flag was deprecated as of CUDA 4.0 and was + replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + + - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + uses a heuristic based on the number of active CUDA contexts in the + process \e C and the number of logical processors in the system \e P. If + \e C > \e P, then CUDA will yield to other OS threads when waiting for + the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + for low-powered devices. + + - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. + This flag must be set in order to allocate pinned host memory that is + accessible to the GPU. + + - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + after resizing local memory for a kernel. This can prevent thrashing by + local memory allocations when launching many kernels with high local + memory usage at the cost of potentially increased memory usage.
+ Deprecated: This flag is deprecated and the behavior enabled + by this flag is now the default and cannot be disabled. + Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). + + - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + be set during context creation to instruct CUDA to create a coredump if + this context raises an exception during execution. These environment variables + are described in the CUDA-GDB user guide under the "GPU core dump support" + section. + The initial attributes will be taken from the global attributes at the time of + context creation. The other attributes that control coredump output can be + modified by calling ::cuCoredumpSetAttribute from the created context after + it becomes current. + + - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + variables, this flag can be set during context creation to instruct CUDA to + create a coredump if data is written to a certain pipe that is present in the + OS space. These environment variables are described in the CUDA-GDB user + guide under the "GPU core dump support" section. + It is important to note that the pipe name *must* be set with + ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + The initial attributes will be taken from the global attributes at the time of + context creation. The other attributes that control coredump output can be + modified by calling ::cuCoredumpSetAttribute from the created context after + it becomes current. + Setting this flag on any context creation is equivalent to setting the + ::CU_COREDUMP_ENABLE_USER_TRIGGER attribute to \p true globally. + + - ::CU_CTX_SYNC_MEMOPS: Ensures that synchronous memory operations initiated + on this context will always synchronize. See further documentation in the + section titled "API Synchronization behavior" to learn more about cases when + synchronous memory operations can exhibit asynchronous behavior. + + Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of + the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() + can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the + compute mode of the device. The nvidia-smi tool can be used to set + the compute mode for * devices. + Documentation for nvidia-smi can be obtained by passing a + -h option to it. + + \param pctx - Returned context handle of the new context + \param flags - Context creation flags + \param dev - Device to create context on + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCoredumpSetAttributeGlobal, + ::cuCoredumpSetAttribute, + ::cuCtxSynchronize*/ + fn cuCtxCreate_v2( + pctx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Create a CUDA context with execution affinity + + Creates a new CUDA context with execution affinity and associates it with + the calling thread. The \p paramsArray and \p flags parameter are described below. + The context is created with a usage count of 1 and the caller of ::cuCtxCreate() must + call ::cuCtxDestroy() when done using the context. If a context is already + current to the thread, it is supplanted by the newly created context and may + be restored by a subsequent call to ::cuCtxPopCurrent(). + + The type and the amount of execution resource the context can use is limited by \p paramsArray + and \p numParams. The \p paramsArray is an array of \p CUexecAffinityParam and the \p numParams + describes the size of the array. If two \p CUexecAffinityParam in the array have the same type, + the latter execution affinity parameter overrides the former execution affinity parameter. + The supported execution affinity types are: + - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs that the context can use. The portion + of SMs is specified as the number of SMs via \p CUexecAffinitySmCount. This limit will be internally + rounded up to the next hardware-supported amount. Hence, it is imperative to query the actual execution + affinity of the context via \p cuCtxGetExecAffinity after context creation. Currently, this attribute + is only supported under Volta+ MPS. + + The three LSBs of the \p flags parameter can be used to control how the OS + thread, which owns the CUDA context at the time of an API call, interacts + with the OS scheduler when waiting for results from the GPU. Only one of + the scheduling flags can be set when creating a context. + + - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + results from the GPU. This can decrease latency when waiting for the GPU, + but may lower the performance of CPU threads if they are performing work in + parallel with the CUDA thread. + + - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + results from the GPU. This can increase latency when waiting for the GPU, + but can increase the performance of CPU threads performing work in parallel + with the GPU. + + - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + synchronization primitive when waiting for the GPU to finish work. + + - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + synchronization primitive when waiting for the GPU to finish work.
+ Deprecated: This flag was deprecated as of CUDA 4.0 and was + replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + + - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + uses a heuristic based on the number of active CUDA contexts in the + process \e C and the number of logical processors in the system \e P. If + \e C > \e P, then CUDA will yield to other OS threads when waiting for + the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + for low-powered devices. + + - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. + This flag must be set in order to allocate pinned host memory that is + accessible to the GPU. + + - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + after resizing local memory for a kernel. This can prevent thrashing by + local memory allocations when launching many kernels with high local + memory usage at the cost of potentially increased memory usage.
+ Deprecated: This flag is deprecated and the behavior enabled + by this flag is now the default and cannot be disabled. + Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). + + - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + be set during context creation to instruct CUDA to create a coredump if + this context raises an exception during execution. These environment variables + are described in the CUDA-GDB user guide under the "GPU core dump support" + section. + The initial attributes will be taken from the global attributes at the time of + context creation. The other attributes that control coredump output can be + modified by calling ::cuCoredumpSetAttribute from the created context after + it becomes current. + + - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + variables, this flag can be set during context creation to instruct CUDA to + create a coredump if data is written to a certain pipe that is present in the + OS space. These environment variables are described in the CUDA-GDB user + guide under the "GPU core dump support" section. + It is important to note that the pipe name *must* be set with + ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + The initial attributes will be taken from the global attributes at the time of + context creation. The other attributes that control coredump output can be + modified by calling ::cuCoredumpSetAttribute from the created context after + it becomes current. + Setting this flag on any context creation is equivalent to setting the + ::CU_COREDUMP_ENABLE_USER_TRIGGER attribute to \p true globally. + + Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of + the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() + can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the + compute mode of the device. The nvidia-smi tool can be used to set + the compute mode for * devices. + Documentation for nvidia-smi can be obtained by passing a + -h option to it. + + \param pctx - Returned context handle of the new context + \param paramsArray - Execution affinity parameters + \param numParams - Number of execution affinity parameters + \param flags - Context creation flags + \param dev - Device to create context on + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cuCoredumpSetAttributeGlobal, + ::cuCoredumpSetAttribute, + ::CUexecAffinityParam*/ + fn cuCtxCreate_v3( + pctx: *mut cuda_types::CUcontext, + paramsArray: *mut cuda_types::CUexecAffinityParam, + numParams: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Destroy a CUDA context + + Destroys the CUDA context specified by \p ctx. The context \p ctx will be + destroyed regardless of how many threads it is current to. + It is the responsibility of the calling function to ensure that no API + call issues using \p ctx while ::cuCtxDestroy() is executing. + + Destroys and cleans up all resources associated with the context. + It is the caller's responsibility to ensure that the context or its resources + are not accessed or passed in subsequent API calls and doing so will result in undefined behavior. + These resources include CUDA types such as ::CUmodule, ::CUfunction, ::CUstream, ::CUevent, + ::CUarray, ::CUmipmappedArray, ::CUtexObject, ::CUsurfObject, ::CUtexref, ::CUsurfref, + ::CUgraphicsResource, ::CUlinkState, ::CUexternalMemory and ::CUexternalSemaphore. + + If \p ctx is current to the calling thread then \p ctx will also be + popped from the current thread's context stack (as though ::cuCtxPopCurrent() + were called). If \p ctx is current to other threads, then \p ctx will + remain current to those threads, and attempting to access \p ctx from + those threads will result in the error ::CUDA_ERROR_CONTEXT_IS_DESTROYED. + + \param ctx - Context to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuCtxDestroy_v2(ctx: cuda_types::CUcontext) -> cuda_types::CUresult; + /** \brief Pushes a context on the current CPU thread + + Pushes the given context \p ctx onto the CPU thread's stack of current + contexts. The specified context becomes the CPU thread's current context, so + all CUDA functions that operate on the current context are affected. + + The previous current context may be made current again by calling + ::cuCtxDestroy() or ::cuCtxPopCurrent(). + + \param ctx - Context to push + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuCtxPushCurrent_v2(ctx: cuda_types::CUcontext) -> cuda_types::CUresult; + /** \brief Pops the current CUDA context from the current CPU thread. + + Pops the current CUDA context from the CPU thread and passes back the + old context handle in \p *pctx. That context may then be made current + to a different CPU thread by calling ::cuCtxPushCurrent(). + + If a context was current to the CPU thread before ::cuCtxCreate() or + ::cuCtxPushCurrent() was called, this function makes that context current to + the CPU thread again. + + \param pctx - Returned popped context handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuCtxPopCurrent_v2(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult; + /** \brief Binds the specified CUDA context to the calling CPU thread + + Binds the specified CUDA context to the calling CPU thread. + If \p ctx is NULL then the CUDA context previously bound to the + calling CPU thread is unbound and ::CUDA_SUCCESS is returned. + + If there exists a CUDA context stack on the calling CPU thread, this + will replace the top of that stack with \p ctx. + If \p ctx is NULL then this will be equivalent to popping the top + of the calling CPU thread's CUDA context stack (or a no-op if the + calling CPU thread's CUDA context stack is empty). + + \param ctx - Context to bind to the calling CPU thread + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa + ::cuCtxGetCurrent, + ::cuCtxCreate, + ::cuCtxDestroy, + ::cudaSetDevice*/ + fn cuCtxSetCurrent(ctx: cuda_types::CUcontext) -> cuda_types::CUresult; + /** \brief Returns the CUDA context bound to the calling CPU thread. + + Returns in \p *pctx the CUDA context bound to the calling CPU thread. + If no context is bound to the calling CPU thread then \p *pctx is + set to NULL and ::CUDA_SUCCESS is returned. + + \param pctx - Returned context handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + \notefnerr + + \sa + ::cuCtxSetCurrent, + ::cuCtxCreate, + ::cuCtxDestroy, + ::cudaGetDevice*/ + fn cuCtxGetCurrent(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult; + /** \brief Returns the device ID for the current context + + Returns in \p *device the ordinal of the current context's device. + + \param device - Returned device ID for the current context + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cudaGetDevice*/ + fn cuCtxGetDevice(device: *mut cuda_types::CUdevice) -> cuda_types::CUresult; + /** \brief Returns the flags for the current context + + Returns in \p *flags the flags of the current context. See ::cuCtxCreate + for flag values. + + \param flags - Pointer to store flags of current context + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetCurrent, + ::cuCtxGetDevice, + ::cuCtxGetLimit, + ::cuCtxGetSharedMemConfig, + ::cuCtxGetStreamPriorityRange, + ::cuCtxSetFlags, + ::cudaGetDeviceFlags*/ + fn cuCtxGetFlags(flags: *mut ::core::ffi::c_uint) -> cuda_types::CUresult; + /** \brief Sets the flags for the current context + + Sets the flags for the current context overwriting previously set ones. See + ::cuDevicePrimaryCtxSetFlags for flag values. + + \param flags - Flags to set on the current context + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetCurrent, + ::cuCtxGetDevice, + ::cuCtxGetLimit, + ::cuCtxGetSharedMemConfig, + ::cuCtxGetStreamPriorityRange, + ::cuCtxGetFlags, + ::cudaGetDeviceFlags, + ::cuDevicePrimaryCtxSetFlags,*/ + fn cuCtxSetFlags(flags: ::core::ffi::c_uint) -> cuda_types::CUresult; + /** \brief Returns the unique Id associated with the context supplied + + Returns in \p ctxId the unique Id which is associated with a given context. + The Id is unique for the life of the program for this instance of CUDA. + If context is supplied as NULL and there is one current, the Id of the + current context is returned. + + \param ctx - Context for which to obtain the Id + \param ctxId - Pointer to store the Id of the context + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPushCurrent*/ + fn cuCtxGetId( + ctx: cuda_types::CUcontext, + ctxId: *mut ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Block for a context's tasks to complete + + Blocks until the device has completed all preceding requested tasks. + ::cuCtxSynchronize() returns an error if one of the preceding tasks failed. + If the context was created with the ::CU_CTX_SCHED_BLOCKING_SYNC flag, the + CPU thread will block until the GPU context has finished its work. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cudaDeviceSynchronize*/ + fn cuCtxSynchronize() -> cuda_types::CUresult; + /** \brief Set resource limits + + Setting \p limit to \p value is a request by the application to update + the current limit maintained by the context. The driver is free to + modify the requested value to meet h/w requirements (this could be + clamping to minimum or maximum values, rounding up to nearest element + size, etc). The application can use ::cuCtxGetLimit() to find out exactly + what the limit has been set to. + + Setting each ::CUlimit has its own specific restrictions, so each is + discussed here. + + - ::CU_LIMIT_STACK_SIZE controls the stack size in bytes of each GPU thread. + The driver automatically increases the per-thread stack size + for each kernel launch as needed. This size isn't reset back to the + original value after each launch. Setting this value will take effect + immediately, and if necessary, the device will block until all preceding + requested tasks are complete. + + - ::CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of the FIFO used + by the ::printf() device system call. Setting ::CU_LIMIT_PRINTF_FIFO_SIZE + must be performed before launching any kernel that uses the ::printf() + device system call, otherwise ::CUDA_ERROR_INVALID_VALUE will be returned. + + - ::CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of the heap used + by the ::malloc() and ::free() device system calls. Setting + ::CU_LIMIT_MALLOC_HEAP_SIZE must be performed before launching any kernel + that uses the ::malloc() or ::free() device system calls, otherwise + ::CUDA_ERROR_INVALID_VALUE will be returned. + + - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum nesting depth of + a grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting + this limit must be performed before any launch of a kernel that uses the + device runtime and calls ::cudaDeviceSynchronize() above the default sync + depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail + with error code ::cudaErrorSyncDepthExceeded if the limitation is + violated. This limit can be set smaller than the default or up the maximum + launch depth of 24. When setting this limit, keep in mind that additional + levels of sync depth require the driver to reserve large amounts of device + memory which can no longer be used for user allocations. If these + reservations of device memory fail, ::cuCtxSetLimit() will return + ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. + This limit is only applicable to devices of compute capability < 9.0. + Attempting to set this limit on devices of other compute capability + versions will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being + returned. + + - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the maximum number of + outstanding device runtime launches that can be made from the current + context. A grid is outstanding from the point of launch up until the grid + is known to have been completed. Device runtime launches which violate + this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when + ::cudaGetLastError() is called after launch. If more pending launches than + the default (2048 launches) are needed for a module using the device + runtime, this limit can be increased. Keep in mind that being able to + sustain additional pending launches will require the driver to reserve + larger amounts of device memory upfront which can no longer be used for + allocations. If these reservations fail, ::cuCtxSetLimit() will return + ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. + This limit is only applicable to devices of compute capability 3.5 and + higher. Attempting to set this limit on devices of compute capability less + than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being + returned. + + - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache fetch granularity. + Values can range from 0B to 128B. This is purely a performance hint and + it can be ignored or clamped depending on the platform. + + - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE controls size in bytes available for + persisting L2 cache. This is purely a performance hint and it can be + ignored or clamped depending on the platform. + + \param limit - Limit to set + \param value - Size of limit + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNSUPPORTED_LIMIT, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSynchronize, + ::cudaDeviceSetLimit*/ + fn cuCtxSetLimit(limit: cuda_types::CUlimit, value: usize) -> cuda_types::CUresult; + /** \brief Returns resource limits + + Returns in \p *pvalue the current size of \p limit. The supported + ::CUlimit values are: + - ::CU_LIMIT_STACK_SIZE: stack size in bytes of each GPU thread. + - ::CU_LIMIT_PRINTF_FIFO_SIZE: size in bytes of the FIFO used by the + ::printf() device system call. + - ::CU_LIMIT_MALLOC_HEAP_SIZE: size in bytes of the heap used by the + ::malloc() and ::free() device system calls. + - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: maximum grid depth at which a thread + can issue the device runtime call ::cudaDeviceSynchronize() to wait on + child grid launches to complete. + - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: maximum number of outstanding + device runtime launches that can be made from this context. + - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY: L2 cache fetch granularity. + - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE: Persisting L2 cache size in bytes + + \param limit - Limit to query + \param pvalue - Returned size of limit + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNSUPPORTED_LIMIT + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cudaDeviceGetLimit*/ + fn cuCtxGetLimit( + pvalue: *mut usize, + limit: cuda_types::CUlimit, + ) -> cuda_types::CUresult; + /** \brief Returns the preferred cache configuration for the current context. + + On devices where the L1 cache and shared memory use the same hardware + resources, this function returns through \p pconfig the preferred cache configuration + for the current context. This is only a preference. The driver will use + the requested configuration if possible, but it is free to choose a different + configuration if required to execute functions. + + This will return a \p pconfig of ::CU_FUNC_CACHE_PREFER_NONE on devices + where the size of the L1 cache and shared memory are fixed. + + The supported cache configurations are: + - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + + \param pconfig - Returned cache configuration + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cuFuncSetCacheConfig, + ::cudaDeviceGetCacheConfig*/ + fn cuCtxGetCacheConfig( + pconfig: *mut cuda_types::CUfunc_cache, + ) -> cuda_types::CUresult; + /** \brief Sets the preferred cache configuration for the current context. + + On devices where the L1 cache and shared memory use the same hardware + resources, this sets through \p config the preferred cache configuration for + the current context. This is only a preference. The driver will use + the requested configuration if possible, but it is free to choose a different + configuration if required to execute the function. Any function preference + set via ::cuFuncSetCacheConfig() or ::cuKernelSetCacheConfig() will be preferred over this context-wide + setting. Setting the context-wide cache configuration to + ::CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel launches to prefer + to not change the cache configuration unless required to launch the kernel. + + This setting does nothing on devices where the size of the L1 cache and + shared memory are fixed. + + Launching a kernel with a different preference than the most recent + preference setting may insert a device-side synchronization point. + + The supported cache configurations are: + - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + + \param config - Requested cache configuration + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cuFuncSetCacheConfig, + ::cudaDeviceSetCacheConfig, + ::cuKernelSetCacheConfig*/ + fn cuCtxSetCacheConfig(config: cuda_types::CUfunc_cache) -> cuda_types::CUresult; + /** \brief Gets the context's API version. + + Returns a version number in \p version corresponding to the capabilities of + the context (e.g. 3010 or 3020), which library developers can use to direct + callers to a specific API version. If \p ctx is NULL, returns the API version + used to create the currently bound context. + + Note that new API versions are only introduced when context capabilities are + changed that break binary compatibility, so the API version and driver version + may be different. For example, it is valid for the API version to be 3020 while + the driver version is 4020. + + \param ctx - Context to check + \param version - Pointer to version + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuCtxGetApiVersion( + ctx: cuda_types::CUcontext, + version: *mut ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Returns numerical values that correspond to the least and + greatest stream priorities. + + Returns in \p *leastPriority and \p *greatestPriority the numerical values that correspond + to the least and greatest stream priorities respectively. Stream priorities + follow a convention where lower numbers imply greater priorities. The range of + meaningful stream priorities is given by [\p *greatestPriority, \p *leastPriority]. + If the user attempts to create a stream with a priority value that is + outside the meaningful range as specified by this API, the priority is + automatically clamped down or up to either \p *leastPriority or \p *greatestPriority + respectively. See ::cuStreamCreateWithPriority for details on creating a + priority stream. + A NULL may be passed in for \p *leastPriority or \p *greatestPriority if the value + is not desired. + + This function will return '0' in both \p *leastPriority and \p *greatestPriority if + the current context's device does not support stream priorities + (see ::cuDeviceGetAttribute). + + \param leastPriority - Pointer to an int in which the numerical value for least + stream priority is returned + \param greatestPriority - Pointer to an int in which the numerical value for greatest + stream priority is returned + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa ::cuStreamCreateWithPriority, + ::cuStreamGetPriority, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cudaDeviceGetStreamPriorityRange*/ + fn cuCtxGetStreamPriorityRange( + leastPriority: *mut ::core::ffi::c_int, + greatestPriority: *mut ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Resets all persisting lines in cache to normal status. + + ::cuCtxResetPersistingL2Cache Resets all persisting lines in cache to normal + status. Takes effect on function return. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa + ::CUaccessPolicyWindow*/ + fn cuCtxResetPersistingL2Cache() -> cuda_types::CUresult; + /** \brief Returns the execution affinity setting for the current context. + + Returns in \p *pExecAffinity the current value of \p type. The supported + ::CUexecAffinityType values are: + - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: number of SMs the context is limited to use. + + \param type - Execution affinity type to query + \param pExecAffinity - Returned execution affinity + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY + \notefnerr + + \sa + ::CUexecAffinityParam*/ + fn cuCtxGetExecAffinity( + pExecAffinity: *mut cuda_types::CUexecAffinityParam, + type_: cuda_types::CUexecAffinityType, + ) -> cuda_types::CUresult; + /** \brief Increment a context's usage-count + + \deprecated + + Note that this function is deprecated and should not be used. + + Increments the usage count of the context and passes back a context handle + in \p *pctx that must be passed to ::cuCtxDetach() when the application is + done with the context. ::cuCtxAttach() fails if there is no context current + to the thread. + + Currently, the \p flags parameter must be 0. + + \param pctx - Returned context handle of the current context + \param flags - Context attach flags (must be 0) + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxDetach, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuCtxAttach( + pctx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Decrement a context's usage-count + + \deprecated + + Note that this function is deprecated and should not be used. + + Decrements the usage count of the context \p ctx, and destroys the context + if the usage count goes to 0. The context must be a handle that was passed + back by ::cuCtxCreate() or ::cuCtxAttach(), and must be current to the + calling thread. + + \param ctx - Context to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetCacheConfig, + ::cuCtxSetLimit, + ::cuCtxSynchronize*/ + fn cuCtxDetach(ctx: cuda_types::CUcontext) -> cuda_types::CUresult; + /** \brief Returns the current shared memory configuration for the current context. + + \deprecated + + This function will return in \p pConfig the current size of shared memory banks + in the current context. On devices with configurable shared memory banks, + ::cuCtxSetSharedMemConfig can be used to change this setting, so that all + subsequent kernel launches will by default use the new bank size. When + ::cuCtxGetSharedMemConfig is called on devices without configurable shared + memory, it will return the fixed bank size of the hardware. + + The returned bank configurations can be either: + - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: shared memory bank width is + four bytes. + - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: shared memory bank width will + eight bytes. + + \param pConfig - returned shared memory configuration + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cuCtxGetSharedMemConfig, + ::cuFuncSetCacheConfig, + ::cudaDeviceGetSharedMemConfig*/ + fn cuCtxGetSharedMemConfig( + pConfig: *mut cuda_types::CUsharedconfig, + ) -> cuda_types::CUresult; + /** \brief Sets the shared memory configuration for the current context. + + \deprecated + + On devices with configurable shared memory banks, this function will set + the context's shared memory bank size which is used for subsequent kernel + launches. + + Changed the shared memory configuration between launches may insert a device + side synchronization point between those launches. + + Changing the shared memory bank size will not increase shared memory usage + or affect occupancy of kernels, but may have major effects on performance. + Larger bank sizes will allow for greater potential bandwidth to shared memory, + but will change what kinds of accesses to shared memory will result in bank + conflicts. + + This function will do nothing on devices with fixed shared memory bank size. + + The supported bank configurations are: + - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: set bank width to the default initial + setting (currently, four bytes). + - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to + be natively four bytes. + - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to + be natively eight bytes. + + \param config - requested shared memory configuration + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, + ::cuCtxDestroy, + ::cuCtxGetApiVersion, + ::cuCtxGetCacheConfig, + ::cuCtxGetDevice, + ::cuCtxGetFlags, + ::cuCtxGetLimit, + ::cuCtxPopCurrent, + ::cuCtxPushCurrent, + ::cuCtxSetLimit, + ::cuCtxSynchronize, + ::cuCtxGetSharedMemConfig, + ::cuFuncSetCacheConfig, + ::cudaDeviceSetSharedMemConfig*/ + fn cuCtxSetSharedMemConfig( + config: cuda_types::CUsharedconfig, + ) -> cuda_types::CUresult; + /** \brief Loads a compute module + + Takes a filename \p fname and loads the corresponding module \p module into + the current context. The CUDA driver API does not attempt to lazily + allocate the resources needed by a module; if the memory for functions and + data (constant and global) needed by the module cannot be allocated, + ::cuModuleLoad() fails. The file should be a \e cubin file as output by + \b nvcc, or a \e PTX file either as output by \b nvcc or handwritten, or + a \e fatbin file as output by \b nvcc from toolchain 4.0 or later. + + \param module - Returned module + \param fname - Filename of module to load + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_NOT_FOUND, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_FILE_NOT_FOUND, + ::CUDA_ERROR_NO_BINARY_FOR_GPU, + ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + \notefnerr + + \sa ::cuModuleGetFunction, + ::cuModuleGetGlobal, + ::cuModuleGetTexRef, + ::cuModuleLoadData, + ::cuModuleLoadDataEx, + ::cuModuleLoadFatBinary, + ::cuModuleUnload*/ + fn cuModuleLoad( + module: *mut cuda_types::CUmodule, + fname: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Load a module's data + + Takes a pointer \p image and loads the corresponding module \p module into + the current context. The \p image may be a \e cubin or \e fatbin + as output by \b nvcc, or a NULL-terminated \e PTX, either as output by \b nvcc + or hand-written. + + \param module - Returned module + \param image - Module data to load + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NO_BINARY_FOR_GPU, + ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + \notefnerr + + \sa ::cuModuleGetFunction, + ::cuModuleGetGlobal, + ::cuModuleGetTexRef, + ::cuModuleLoad, + ::cuModuleLoadDataEx, + ::cuModuleLoadFatBinary, + ::cuModuleUnload*/ + fn cuModuleLoadData( + module: *mut cuda_types::CUmodule, + image: *const ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Load a module's data with options + + Takes a pointer \p image and loads the corresponding module \p module into + the current context. The \p image may be a \e cubin or \e fatbin + as output by \b nvcc, or a NULL-terminated \e PTX, either as output by \b nvcc + or hand-written. + + \param module - Returned module + \param image - Module data to load + \param numOptions - Number of options + \param options - Options for JIT + \param optionValues - Option values for JIT + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NO_BINARY_FOR_GPU, + ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + \notefnerr + + \sa ::cuModuleGetFunction, + ::cuModuleGetGlobal, + ::cuModuleGetTexRef, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadFatBinary, + ::cuModuleUnload*/ + fn cuModuleLoadDataEx( + module: *mut cuda_types::CUmodule, + image: *const ::core::ffi::c_void, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Load a module's data + + Takes a pointer \p fatCubin and loads the corresponding module \p module + into the current context. The pointer represents a fat binary object, + which is a collection of different \e cubin and/or \e PTX files, all + representing the same device code, but compiled and optimized for different + architectures. + + Prior to CUDA 4.0, there was no documented API for constructing and using + fat binary objects by programmers. Starting with CUDA 4.0, fat binary + objects can be constructed by providing the -fatbin option to \b nvcc. + More information can be found in the \b nvcc document. + + \param module - Returned module + \param fatCubin - Fat binary to load + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_NOT_FOUND, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NO_BINARY_FOR_GPU, + ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + \notefnerr + + \sa ::cuModuleGetFunction, + ::cuModuleGetGlobal, + ::cuModuleGetTexRef, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx, + ::cuModuleUnload*/ + fn cuModuleLoadFatBinary( + module: *mut cuda_types::CUmodule, + fatCubin: *const ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Unloads a module + + Unloads a module \p hmod from the current context. Attempting to unload + a module which was obtained from the Library Management API such as + ::cuLibraryGetModule will return ::CUDA_ERROR_NOT_PERMITTED. + + \param hmod - Module to unload + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_PERMITTED + \notefnerr + \note_destroy_ub + + \sa ::cuModuleGetFunction, + ::cuModuleGetGlobal, + ::cuModuleGetTexRef, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx, + ::cuModuleLoadFatBinary*/ + fn cuModuleUnload(hmod: cuda_types::CUmodule) -> cuda_types::CUresult; + /** \brief Query lazy loading mode + + Returns lazy loading mode + Module loading mode is controlled by CUDA_MODULE_LOADING env variable + + \param mode - Returns the lazy loading mode + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa + ::cuModuleLoad,*/ + fn cuModuleGetLoadingMode( + mode: *mut cuda_types::CUmoduleLoadingMode, + ) -> cuda_types::CUresult; + /** \brief Returns a function handle + + Returns in \p *hfunc the handle of the function of name \p name located in + module \p hmod. If no function of that name exists, ::cuModuleGetFunction() + returns ::CUDA_ERROR_NOT_FOUND. + + \param hfunc - Returned function handle + \param hmod - Module to retrieve function from + \param name - Name of function to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_FOUND + \notefnerr + + \sa ::cuModuleGetGlobal, + ::cuModuleGetTexRef, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx, + ::cuModuleLoadFatBinary, + ::cuModuleUnload*/ + fn cuModuleGetFunction( + hfunc: *mut cuda_types::CUfunction, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Returns the number of functions within a module + + Returns in \p count the number of functions in \p mod. + + \param count - Number of functions found within the module + \param mod - Module to query + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE*/ + fn cuModuleGetFunctionCount( + count: *mut ::core::ffi::c_uint, + mod_: cuda_types::CUmodule, + ) -> cuda_types::CUresult; + /** \brief Returns the function handles within a module. + + Returns in \p functions a maximum number of \p numFunctions function handles within \p mod. When + function loading mode is set to LAZY the function retrieved may be partially loaded. The loading + state of a function can be queried using ::cuFunctionIsLoaded. CUDA APIs may load the function + automatically when called with partially loaded function handle which may incur additional + latency. Alternatively, ::cuFunctionLoad can be used to explicitly load a function. The returned + function handles become invalid when the module is unloaded. + + \param functions - Buffer where the function handles are returned to + \param numFunctions - Maximum number of function handles may be returned to the buffer + \param mod - Module to query from + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuModuleGetFunction, + ::cuModuleGetFunctionCount, + ::cuFuncIsLoaded, + ::cuFuncLoad*/ + fn cuModuleEnumerateFunctions( + functions: *mut cuda_types::CUfunction, + numFunctions: ::core::ffi::c_uint, + mod_: cuda_types::CUmodule, + ) -> cuda_types::CUresult; + /** \brief Returns a global pointer from a module + + Returns in \p *dptr and \p *bytes the base pointer and size of the + global of name \p name located in module \p hmod. If no variable of that name + exists, ::cuModuleGetGlobal() returns ::CUDA_ERROR_NOT_FOUND. + One of the parameters \p dptr or \p bytes (not both) can be NULL in which + case it is ignored. + + \param dptr - Returned global device pointer + \param bytes - Returned global size in bytes + \param hmod - Module to retrieve global from + \param name - Name of global to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_FOUND + \notefnerr + + \sa ::cuModuleGetFunction, + ::cuModuleGetTexRef, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx, + ::cuModuleLoadFatBinary, + ::cuModuleUnload, + ::cudaGetSymbolAddress, + ::cudaGetSymbolSize*/ + fn cuModuleGetGlobal_v2( + dptr: *mut cuda_types::CUdeviceptr, + bytes: *mut usize, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Creates a pending JIT linker invocation. + + If the call is successful, the caller owns the returned CUlinkState, which + should eventually be destroyed with ::cuLinkDestroy. The + device code machine size (32 or 64 bit) will match the calling application. + + Both linker and compiler options may be specified. Compiler options will + be applied to inputs to this linker action which must be compiled from PTX. + The options ::CU_JIT_WALL_TIME, + ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, and ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES + will accumulate data until the CUlinkState is destroyed. + + \p optionValues must remain valid for the life of the CUlinkState if output + options are used. No other references to inputs are maintained after this + call returns. + + \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + + \param numOptions Size of options arrays + \param options Array of linker and compiler options + \param optionValues Array of option values, each cast to void * + \param stateOut On success, this will contain a CUlinkState to specify + and complete this action + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + \notefnerr + + \sa ::cuLinkAddData, + ::cuLinkAddFile, + ::cuLinkComplete, + ::cuLinkDestroy*/ + fn cuLinkCreate_v2( + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + stateOut: *mut cuda_types::CUlinkState, + ) -> cuda_types::CUresult; + /** \brief Add an input to a pending linker invocation + + Ownership of \p data is retained by the caller. No reference is retained to any + inputs after this call returns. + + This method accepts only compiler options, which are used if the data must + be compiled from PTX, and does not accept any of + ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, + ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. + + \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + + \param state A pending linker action. + \param type The type of the input data. + \param data The input data. PTX must be NULL-terminated. + \param size The length of the input data. + \param name An optional name for this input in log messages. + \param numOptions Size of options. + \param options Options to be applied only for this input (overrides options from ::cuLinkCreate). + \param optionValues Array of option values, each cast to void *. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_IMAGE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NO_BINARY_FOR_GPU + + \sa ::cuLinkCreate, + ::cuLinkAddFile, + ::cuLinkComplete, + ::cuLinkDestroy*/ + fn cuLinkAddData_v2( + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + data: *mut ::core::ffi::c_void, + size: usize, + name: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Add a file input to a pending linker invocation + + No reference is retained to any inputs after this call returns. + + This method accepts only compiler options, which are used if the input + must be compiled from PTX, and does not accept any of + ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, + ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. + + This method is equivalent to invoking ::cuLinkAddData on the contents + of the file. + + \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + + \param state A pending linker action + \param type The type of the input data + \param path Path to the input file + \param numOptions Size of options + \param options Options to be applied only for this input (overrides options from ::cuLinkCreate) + \param optionValues Array of option values, each cast to void * + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_FILE_NOT_FOUND + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_IMAGE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NO_BINARY_FOR_GPU + + \sa ::cuLinkCreate, + ::cuLinkAddData, + ::cuLinkComplete, + ::cuLinkDestroy*/ + fn cuLinkAddFile_v2( + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + path: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Complete a pending linker invocation + + Completes the pending linker action and returns the cubin image for the linked + device code, which can be used with ::cuModuleLoadData. The cubin is owned by + \p state, so it should be loaded before \p state is destroyed via ::cuLinkDestroy. + This call does not destroy \p state. + + \param state A pending linker invocation + \param cubinOut On success, this will point to the output image + \param sizeOut Optional parameter to receive the size of the generated image + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuLinkCreate, + ::cuLinkAddData, + ::cuLinkAddFile, + ::cuLinkDestroy, + ::cuModuleLoadData*/ + fn cuLinkComplete( + state: cuda_types::CUlinkState, + cubinOut: *mut *mut ::core::ffi::c_void, + sizeOut: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Destroys state for a JIT linker invocation. + + \param state State object for the linker invocation + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE + + \sa ::cuLinkCreate*/ + fn cuLinkDestroy(state: cuda_types::CUlinkState) -> cuda_types::CUresult; + /** \brief Returns a handle to a texture reference + + \deprecated + + Returns in \p *pTexRef the handle of the texture reference of name \p name + in the module \p hmod. If no texture reference of that name exists, + ::cuModuleGetTexRef() returns ::CUDA_ERROR_NOT_FOUND. This texture reference + handle should not be destroyed, since it will be destroyed when the module + is unloaded. + + \param pTexRef - Returned texture reference + \param hmod - Module to retrieve texture reference from + \param name - Name of texture reference to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_FOUND + \notefnerr + + \sa + ::cuModuleGetFunction, + ::cuModuleGetGlobal, + ::cuModuleGetSurfRef, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx, + ::cuModuleLoadFatBinary, + ::cuModuleUnload*/ + fn cuModuleGetTexRef( + pTexRef: *mut cuda_types::CUtexref, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Returns a handle to a surface reference + + \deprecated + + Returns in \p *pSurfRef the handle of the surface reference of name \p name + in the module \p hmod. If no surface reference of that name exists, + ::cuModuleGetSurfRef() returns ::CUDA_ERROR_NOT_FOUND. + + \param pSurfRef - Returned surface reference + \param hmod - Module to retrieve surface reference from + \param name - Name of surface reference to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_FOUND + \notefnerr + + \sa + ::cuModuleGetFunction, + ::cuModuleGetGlobal, + ::cuModuleGetTexRef, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx, + ::cuModuleLoadFatBinary, + ::cuModuleUnload*/ + fn cuModuleGetSurfRef( + pSurfRef: *mut cuda_types::CUsurfref, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Load a library with specified code and options + + Takes a pointer \p code and loads the corresponding library \p library based on + the application defined library loading mode: + - If module loading is set to EAGER, via the environment variables described in "Module loading", + \p library is loaded eagerly into all contexts at the time of the call and future contexts + at the time of creation until the library is unloaded with ::cuLibraryUnload(). + - If the environment variables are set to LAZY, \p library + is not immediately loaded onto all existent contexts and will only be + loaded when a function is needed for that context, such as a kernel launch. + + These environment variables are described in the CUDA programming guide under the + "CUDA environment variables" section. + + The \p code may be a \e cubin or \e fatbin as output by \b nvcc, + or a NULL-terminated \e PTX, either as output by \b nvcc or hand-written. + + Options are passed as an array via \p jitOptions and any corresponding parameters are passed in + \p jitOptionsValues. The number of total JIT options is supplied via \p numJitOptions. + Any outputs will be returned via \p jitOptionsValues. + + Library load options are passed as an array via \p libraryOptions and any corresponding parameters are passed in + \p libraryOptionValues. The number of total library load options is supplied via \p numLibraryOptions. + + \param library - Returned library + \param code - Code to load + \param jitOptions - Options for JIT + \param jitOptionsValues - Option values for JIT + \param numJitOptions - Number of options + \param libraryOptions - Options for loading + \param libraryOptionValues - Option values for loading + \param numLibraryOptions - Number of options for loading + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NO_BINARY_FOR_GPU, + ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + + \sa ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx*/ + fn cuLibraryLoadData( + library: *mut cuda_types::CUlibrary, + code: *const ::core::ffi::c_void, + jitOptions: *mut cuda_types::CUjit_option, + jitOptionsValues: *mut *mut ::core::ffi::c_void, + numJitOptions: ::core::ffi::c_uint, + libraryOptions: *mut cuda_types::CUlibraryOption, + libraryOptionValues: *mut *mut ::core::ffi::c_void, + numLibraryOptions: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Load a library with specified file and options + + Takes a pointer \p code and loads the corresponding library \p library based on + the application defined library loading mode: + - If module loading is set to EAGER, via the environment variables described in "Module loading", + \p library is loaded eagerly into all contexts at the time of the call and future contexts + at the time of creation until the library is unloaded with ::cuLibraryUnload(). + - If the environment variables are set to LAZY, \p library + is not immediately loaded onto all existent contexts and will only be + loaded when a function is needed for that context, such as a kernel launch. + + These environment variables are described in the CUDA programming guide under the + "CUDA environment variables" section. + + The file should be a \e cubin file as output by \b nvcc, or a \e PTX file either + as output by \b nvcc or handwritten, or a \e fatbin file as output by \b nvcc. + + Options are passed as an array via \p jitOptions and any corresponding parameters are + passed in \p jitOptionsValues. The number of total options is supplied via \p numJitOptions. + Any outputs will be returned via \p jitOptionsValues. + + Library load options are passed as an array via \p libraryOptions and any corresponding parameters are passed in + \p libraryOptionValues. The number of total library load options is supplied via \p numLibraryOptions. + + \param library - Returned library + \param fileName - File to load from + \param jitOptions - Options for JIT + \param jitOptionsValues - Option values for JIT + \param numJitOptions - Number of options + \param libraryOptions - Options for loading + \param libraryOptionValues - Option values for loading + \param numLibraryOptions - Number of options for loading + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_PTX, + ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NO_BINARY_FOR_GPU, + ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + + \sa ::cuLibraryLoadData, + ::cuLibraryUnload, + ::cuModuleLoad, + ::cuModuleLoadData, + ::cuModuleLoadDataEx*/ + fn cuLibraryLoadFromFile( + library: *mut cuda_types::CUlibrary, + fileName: *const ::core::ffi::c_char, + jitOptions: *mut cuda_types::CUjit_option, + jitOptionsValues: *mut *mut ::core::ffi::c_void, + numJitOptions: ::core::ffi::c_uint, + libraryOptions: *mut cuda_types::CUlibraryOption, + libraryOptionValues: *mut *mut ::core::ffi::c_void, + numLibraryOptions: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Unloads a library + + Unloads the library specified with \p library + + \param library - Library to unload + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuModuleUnload*/ + fn cuLibraryUnload(library: cuda_types::CUlibrary) -> cuda_types::CUresult; + /** \brief Returns a kernel handle + + Returns in \p pKernel the handle of the kernel with name \p name located in library \p library. + If kernel handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + + \param pKernel - Returned kernel handle + \param library - Library to retrieve kernel from + \param name - Name of kernel to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_FOUND + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuKernelGetFunction, + ::cuLibraryGetModule, + ::cuModuleGetFunction*/ + fn cuLibraryGetKernel( + pKernel: *mut cuda_types::CUkernel, + library: cuda_types::CUlibrary, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Returns the number of kernels within a library + + Returns in \p count the number of kernels in \p lib. + + \param count - Number of kernels found within the library + \param lib - Library to query + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE*/ + fn cuLibraryGetKernelCount( + count: *mut ::core::ffi::c_uint, + lib: cuda_types::CUlibrary, + ) -> cuda_types::CUresult; + /** \brief Retrieve the kernel handles within a library. + + Returns in \p kernels a maximum number of \p numKernels kernel handles within \p lib. + The returned kernel handle becomes invalid when the library is unloaded. + + \param kernels - Buffer where the kernel handles are returned to + \param numKernels - Maximum number of kernel handles may be returned to the buffer + \param lib - Library to query from + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuLibraryGetKernelCount*/ + fn cuLibraryEnumerateKernels( + kernels: *mut cuda_types::CUkernel, + numKernels: ::core::ffi::c_uint, + lib: cuda_types::CUlibrary, + ) -> cuda_types::CUresult; + /** \brief Returns a module handle + + Returns in \p pMod the module handle associated with the current context located in + library \p library. If module handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + + \param pMod - Returned module handle + \param library - Library to retrieve module from + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_FOUND, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuModuleGetFunction*/ + fn cuLibraryGetModule( + pMod: *mut cuda_types::CUmodule, + library: cuda_types::CUlibrary, + ) -> cuda_types::CUresult; + /** \brief Returns a function handle + + Returns in \p pFunc the handle of the function for the requested kernel \p kernel and + the current context. If function handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + + \param pFunc - Returned function handle + \param kernel - Kernel to retrieve function for the requested context + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_FOUND, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuLibraryGetKernel, + ::cuLibraryGetModule, + ::cuModuleGetFunction*/ + fn cuKernelGetFunction( + pFunc: *mut cuda_types::CUfunction, + kernel: cuda_types::CUkernel, + ) -> cuda_types::CUresult; + /** \brief Returns a global device pointer + + Returns in \p *dptr and \p *bytes the base pointer and size of the global with + name \p name for the requested library \p library and the current context. + If no global for the requested name \p name exists, the call returns ::CUDA_ERROR_NOT_FOUND. + One of the parameters \p dptr or \p bytes (not both) can be NULL in which + case it is ignored. + + \param dptr - Returned global device pointer for the requested context + \param bytes - Returned global size in bytes + \param library - Library to retrieve global from + \param name - Name of global to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_FOUND, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuLibraryGetModule, + cuModuleGetGlobal*/ + fn cuLibraryGetGlobal( + dptr: *mut cuda_types::CUdeviceptr, + bytes: *mut usize, + library: cuda_types::CUlibrary, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Returns a pointer to managed memory + + Returns in \p *dptr and \p *bytes the base pointer and size of the managed memory with + name \p name for the requested library \p library. If no managed memory with the + requested name \p name exists, the call returns ::CUDA_ERROR_NOT_FOUND. One of the parameters + \p dptr or \p bytes (not both) can be NULL in which case it is ignored. + Note that managed memory for library \p library is shared across devices and is registered + when the library is loaded into atleast one context. + + \note The API requires a CUDA context to be present and initialized on at least one device. + If no context is present, the call returns ::CUDA_ERROR_NOT_FOUND. + + \param dptr - Returned pointer to the managed memory + \param bytes - Returned memory size in bytes + \param library - Library to retrieve managed memory from + \param name - Name of managed memory to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_FOUND + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload*/ + fn cuLibraryGetManaged( + dptr: *mut cuda_types::CUdeviceptr, + bytes: *mut usize, + library: cuda_types::CUlibrary, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Returns a pointer to a unified function + + Returns in \p *fptr the function pointer to a unified function denoted by \p symbol. + If no unified function with name \p symbol exists, the call returns ::CUDA_ERROR_NOT_FOUND. + If there is no device with attribute ::CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS present in the system, + the call may return ::CUDA_ERROR_NOT_FOUND. + + \param fptr - Returned pointer to a unified function + \param library - Library to retrieve function pointer memory from + \param symbol - Name of function pointer to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_FOUND + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload*/ + fn cuLibraryGetUnifiedFunction( + fptr: *mut *mut ::core::ffi::c_void, + library: cuda_types::CUlibrary, + symbol: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Returns information about a kernel + + Returns in \p *pi the integer value of the attribute \p attrib for the kernel + \p kernel for the requested device \p dev. The supported attributes are: + - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads + per block, beyond which a launch of the kernel would fail. This number + depends on both the kernel and the requested device. + - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of + statically-allocated shared memory per block required by this kernel. + This does not include dynamically-allocated shared memory requested by + the user at runtime. + - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated + constant memory required by this kernel. + - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory + used by each thread of this kernel. + - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread + of this kernel. + - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for + which the kernel was compiled. This value is the major PTX version * 10 + + the minor PTX version, so a PTX version 1.3 function would return the + value 13. Note that this may return the undefined value of 0 for cubins + compiled prior to CUDA 3.0. + - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for + which the kernel was compiled. This value is the major binary + version * 10 + the minor binary version, so a binary version 1.3 function + would return the value 13. Note that this will return a value of 10 for + legacy cubins that do not have a properly-encoded binary architecture + version. + - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the kernel has + been compiled with user specified option "-Xptxas --dlcm=ca" set. + - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of + dynamically-allocated shared memory. + - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1 + cache split ratio in percent of total shared memory. + - ::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this attribute is set, the + kernel must launch with a valid cluster size specified. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + blocks. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + blocks. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + blocks. + - ::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: Indicates whether + the function can be launched with non-portable cluster size. 1 is allowed, + 0 is disallowed. A non-portable cluster size may only function on the + specific SKUs the program is tested on. The launch might fail if the + program is run on a different hardware platform. CUDA API provides + cudaOccupancyMaxActiveClusters to assist with checking whether the desired + size can be launched on the current device. A portable cluster size is + guaranteed to be functional on all compute capabilities higher than the + target compute capability. The portable cluster size for sm_90 is 8 blocks + per cluster. This value may increase for future compute capabilities. The + specific hardware unit may support higher cluster sizes that’s not + guaranteed to be portable. + - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + + \note If another thread is trying to set the same attribute on the same device using + ::cuKernelSetAttribute() simultaneously, the attribute query will give the old or new + value depending on the interleavings chosen by the OS scheduler and memory consistency. + + \param pi - Returned attribute value + \param attrib - Attribute requested + \param kernel - Kernel to query attribute of + \param dev - Device to query attribute of + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuKernelSetAttribute, + ::cuLibraryGetKernel, + ::cuLaunchKernel, + ::cuKernelGetFunction, + ::cuLibraryGetModule, + ::cuModuleGetFunction, + ::cuFuncGetAttribute*/ + fn cuKernelGetAttribute( + pi: *mut ::core::ffi::c_int, + attrib: cuda_types::CUfunction_attribute, + kernel: cuda_types::CUkernel, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Sets information about a kernel + + This call sets the value of a specified attribute \p attrib on the kernel \p kernel + for the requested device \p dev to an integer value specified by \p val. + This function returns CUDA_SUCCESS if the new value of the attribute could be + successfully set. If the set fails, this call will return an error. + Not all attributes can have values set. Attempting to set a value on a read-only + attribute will result in an error (CUDA_ERROR_INVALID_VALUE) + + Note that attributes set using ::cuFuncSetAttribute() will override the attribute + set by this API irrespective of whether the call to ::cuFuncSetAttribute() is made + before or after this API call. However, ::cuKernelGetAttribute() will always + return the attribute value set by this API. + + Supported attributes are: + - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This is the maximum size in bytes of + dynamically-allocated shared memory. The value should contain the requested + maximum size of dynamically-allocated shared memory. The sum of this value and + the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the + device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN. + The maximal size of requestable dynamic shared memory may differ by GPU + architecture. + - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1 + cache and shared memory use the same hardware resources, this sets the shared memory + carveout preference, in percent of the total shared memory. + See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + This is only a hint, and the driver can choose a different ratio if required to execute the function. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + blocks. The width, height, and depth values must either all be 0 or all be + positive. The validity of the cluster dimensions is checked at launch time. + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + blocks. The width, height, and depth values must either all be 0 or all be + positive. The validity of the cluster dimensions is checked at launch time. + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + blocks. The width, height, and depth values must either all be 0 or all be + positive. The validity of the cluster dimensions is checked at launch time. + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + + \note The API has stricter locking requirements in comparison to its legacy counterpart + ::cuFuncSetAttribute() due to device-wide semantics. If multiple threads are trying to + set the same attribute on the same device simultaneously, the attribute setting will depend + on the interleavings chosen by the OS scheduler and memory consistency. + + \param attrib - Attribute requested + \param val - Value to set + \param kernel - Kernel to set attribute of + \param dev - Device to set attribute of + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuKernelGetAttribute, + ::cuLibraryGetKernel, + ::cuLaunchKernel, + ::cuKernelGetFunction, + ::cuLibraryGetModule, + ::cuModuleGetFunction, + ::cuFuncSetAttribute*/ + fn cuKernelSetAttribute( + attrib: cuda_types::CUfunction_attribute, + val: ::core::ffi::c_int, + kernel: cuda_types::CUkernel, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Sets the preferred cache configuration for a device kernel. + + On devices where the L1 cache and shared memory use the same hardware + resources, this sets through \p config the preferred cache configuration for + the device kernel \p kernel on the requested device \p dev. This is only a preference. + The driver will use the requested configuration if possible, but it is free to choose a different + configuration if required to execute \p kernel. Any context-wide preference + set via ::cuCtxSetCacheConfig() will be overridden by this per-kernel + setting. + + Note that attributes set using ::cuFuncSetCacheConfig() will override the attribute + set by this API irrespective of whether the call to ::cuFuncSetCacheConfig() is made + before or after this API call. + + This setting does nothing on devices where the size of the L1 cache and + shared memory are fixed. + + Launching a kernel with a different preference than the most recent + preference setting may insert a device-side synchronization point. + + + The supported cache configurations are: + - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + + \note The API has stricter locking requirements in comparison to its legacy counterpart + ::cuFuncSetCacheConfig() due to device-wide semantics. If multiple threads are trying to + set a config on the same device simultaneously, the cache config setting will depend + on the interleavings chosen by the OS scheduler and memory consistency. + + \param kernel - Kernel to configure cache for + \param config - Requested cache configuration + \param dev - Device to set attribute of + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuLibraryLoadData, + ::cuLibraryLoadFromFile, + ::cuLibraryUnload, + ::cuLibraryGetKernel, + ::cuKernelGetFunction, + ::cuLibraryGetModule, + ::cuModuleGetFunction, + ::cuFuncSetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuLaunchKernel*/ + fn cuKernelSetCacheConfig( + kernel: cuda_types::CUkernel, + config: cuda_types::CUfunc_cache, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns the function name for a ::CUkernel handle + + Returns in \p **name the function name associated with the kernel handle \p hfunc . + The function name is returned as a null-terminated string. The returned name is only + valid when the kernel handle is valid. If the library is unloaded or reloaded, one + must call the API again to get the updated name. This API may return a mangled name if + the function is not declared as having C linkage. If either \p **name or \p hfunc + is NULL, ::CUDA_ERROR_INVALID_VALUE is returned. + + \param name - The returned name of the function + \param hfunc - The function handle to retrieve the name for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr +*/ + fn cuKernelGetName( + name: *mut *const ::core::ffi::c_char, + hfunc: cuda_types::CUkernel, + ) -> cuda_types::CUresult; + /** \brief Returns the offset and size of a kernel parameter in the device-side parameter layout + + Queries the kernel parameter at \p paramIndex into \p kernel's list of parameters, and returns + in \p paramOffset and \p paramSize the offset and size, respectively, where the parameter + will reside in the device-side parameter layout. This information can be used to update kernel + node parameters from the device via ::cudaGraphKernelNodeSetParam() and + ::cudaGraphKernelNodeUpdatesApply(). \p paramIndex must be less than the number of parameters + that \p kernel takes. \p paramSize can be set to NULL if only the parameter offset is desired. + + \param kernel - The kernel to query + \param paramIndex - The parameter index to query + \param paramOffset - Returns the offset into the device-side parameter layout at which the parameter resides + \param paramSize - Optionally returns the size of the parameter in the device-side parameter layout + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa ::cuFuncGetParamInfo*/ + fn cuKernelGetParamInfo( + kernel: cuda_types::CUkernel, + paramIndex: usize, + paramOffset: *mut usize, + paramSize: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Gets free and total memory + + Returns in \p *total the total amount of memory available to the the current context. + Returns in \p *free the amount of memory on the device that is free according to the OS. + CUDA is not guaranteed to be able to allocate all of the memory that the OS reports as free. + In a multi-tenet situation, free estimate returned is prone to race condition where + a new allocation/free done by a different process or a different thread in the same + process between the time when free memory was estimated and reported, will result in + deviation in free value reported and actual free memory. + + The integrated GPU on Tegra shares memory with CPU and other component + of the SoC. The free and total values returned by the API excludes + the SWAP memory space maintained by the OS on some platforms. + The OS may move some of the memory pages into swap area as the GPU or + CPU allocate or access memory. See Tegra app note on how to calculate + total and free memory on Tegra. + + \param free - Returned free memory in bytes + \param total - Returned total memory in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemGetInfo*/ + fn cuMemGetInfo_v2(free: *mut usize, total: *mut usize) -> cuda_types::CUresult; + /** \brief Allocates device memory + + Allocates \p bytesize bytes of linear memory on the device and returns in + \p *dptr a pointer to the allocated memory. The allocated memory is suitably + aligned for any kind of variable. The memory is not cleared. If \p bytesize + is 0, ::cuMemAlloc() returns ::CUDA_ERROR_INVALID_VALUE. + + \param dptr - Returned device pointer + \param bytesize - Requested allocation size in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMalloc*/ + fn cuMemAlloc_v2( + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + ) -> cuda_types::CUresult; + /** \brief Allocates pitched device memory + + Allocates at least \p WidthInBytes * \p Height bytes of linear memory on + the device and returns in \p *dptr a pointer to the allocated memory. The + function may pad the allocation to ensure that corresponding pointers in + any given row will continue to meet the alignment requirements for + coalescing as the address is updated from row to row. \p ElementSizeBytes + specifies the size of the largest reads and writes that will be performed + on the memory range. \p ElementSizeBytes may be 4, 8 or 16 (since coalesced + memory transactions are not possible on other data sizes). If + \p ElementSizeBytes is smaller than the actual read/write size of a kernel, + the kernel will run correctly, but possibly at reduced speed. The pitch + returned in \p *pPitch by ::cuMemAllocPitch() is the width in bytes of the + allocation. The intended usage of pitch is as a separate parameter of the + allocation, used to compute addresses within the 2D array. Given the row + and column of an array element of type \b T, the address is computed as: + \code +T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column; + \endcode + + The pitch returned by ::cuMemAllocPitch() is guaranteed to work with + ::cuMemcpy2D() under all circumstances. For allocations of 2D arrays, it is + recommended that programmers consider performing pitch allocations using + ::cuMemAllocPitch(). Due to alignment restrictions in the hardware, this is + especially true if the application will be performing 2D memory copies + between different regions of device memory (whether linear memory or CUDA + arrays). + + The byte alignment of the pitch returned by ::cuMemAllocPitch() is guaranteed + to match or exceed the alignment requirement for texture binding with + ::cuTexRefSetAddress2D(). + + \param dptr - Returned device pointer + \param pPitch - Returned pitch of allocation in bytes + \param WidthInBytes - Requested allocation width in bytes + \param Height - Requested allocation height in rows + \param ElementSizeBytes - Size of largest reads/writes for range + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMallocPitch*/ + fn cuMemAllocPitch_v2( + dptr: *mut cuda_types::CUdeviceptr, + pPitch: *mut usize, + WidthInBytes: usize, + Height: usize, + ElementSizeBytes: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Frees device memory + + Frees the memory space pointed to by \p dptr, which must have been returned + by a previous call to one of the following memory allocation APIs - ::cuMemAlloc(), + ::cuMemAllocPitch(), ::cuMemAllocManaged(), ::cuMemAllocAsync(), ::cuMemAllocFromPoolAsync() + + Note - This API will not perform any implict synchronization when the pointer was allocated with + ::cuMemAllocAsync or ::cuMemAllocFromPoolAsync. Callers must ensure that all accesses to the + pointer have completed before invoking ::cuMemFree. For best performance and memory reuse, users + should use ::cuMemFreeAsync to free memory allocated via the stream ordered memory allocator. + + \param dptr - Pointer to memory to free + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemAllocManaged, ::cuMemAllocAsync, ::cuMemAllocFromPoolAsync, + ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, ::cuMemcpy3D, ::cuMemcpy3DAsync, + ::cuMemcpyAtoA, ::cuMemcpyAtoD, ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, + ::cuMemcpyHtoAAsync, ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, ::cuMemFreeAsync, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaFree*/ + fn cuMemFree_v2(dptr: cuda_types::CUdeviceptr) -> cuda_types::CUresult; + /** \brief Get information on memory allocations + + Returns the base address in \p *pbase and size in \p *psize of the + allocation by ::cuMemAlloc() or ::cuMemAllocPitch() that contains the input + pointer \p dptr. Both parameters \p pbase and \p psize are optional. If one + of them is NULL, it is ignored. + + \param pbase - Returned base address + \param psize - Returned size of device memory allocation + \param dptr - Device pointer to query + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_NOT_FOUND, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32*/ + fn cuMemGetAddressRange_v2( + pbase: *mut cuda_types::CUdeviceptr, + psize: *mut usize, + dptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Allocates page-locked host memory + + Allocates \p bytesize bytes of host memory that is page-locked and + accessible to the device. The driver tracks the virtual memory ranges + allocated with this function and automatically accelerates calls to + functions such as ::cuMemcpy(). Since the memory can be accessed directly by + the device, it can be read or written with much higher bandwidth than + pageable memory obtained with functions such as ::malloc(). + + On systems where ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + is true, ::cuMemAllocHost may not page-lock the allocated memory. + + Page-locking excessive amounts of memory with ::cuMemAllocHost() may degrade system + performance, since it reduces the amount of memory available to the system + for paging. As a result, this function is best used sparingly to allocate + staging areas for data exchange between host and device. + + Note all host memory allocated using ::cuMemAllocHost() will automatically + be immediately accessible to all contexts on all devices which support unified + addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). + The device pointer that may be used to access this host memory from those + contexts is always equal to the returned host pointer \p *pp. + See \ref CUDA_UNIFIED for additional details. + + \param pp - Returned pointer to host memory + \param bytesize - Requested allocation size in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMallocHost*/ + fn cuMemAllocHost_v2( + pp: *mut *mut ::core::ffi::c_void, + bytesize: usize, + ) -> cuda_types::CUresult; + /** \brief Frees page-locked host memory + + Frees the memory space pointed to by \p p, which must have been returned by + a previous call to ::cuMemAllocHost(). + + \param p - Pointer to memory to free + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaFreeHost*/ + fn cuMemFreeHost(p: *mut ::core::ffi::c_void) -> cuda_types::CUresult; + /** \brief Allocates page-locked host memory + + Allocates \p bytesize bytes of host memory that is page-locked and accessible + to the device. The driver tracks the virtual memory ranges allocated with + this function and automatically accelerates calls to functions such as + ::cuMemcpyHtoD(). Since the memory can be accessed directly by the device, + it can be read or written with much higher bandwidth than pageable memory + obtained with functions such as ::malloc(). + + On systems where ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + is true, ::cuMemHostAlloc may not page-lock the allocated memory. + + Page-locking excessive amounts of memory may degrade system performance, + since it reduces the amount of memory available to the system for paging. + As a result, this function is best used sparingly to allocate staging areas + for data exchange between host and device. + + The \p Flags parameter enables different options to be specified that + affect the allocation, as follows. + + - ::CU_MEMHOSTALLOC_PORTABLE: The memory returned by this call will be + considered as pinned memory by all CUDA contexts, not just the one that + performed the allocation. + + - ::CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the CUDA address + space. The device pointer to the memory may be obtained by calling + ::cuMemHostGetDevicePointer(). + + - ::CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as write-combined + (WC). WC memory can be transferred across the PCI Express bus more + quickly on some system configurations, but cannot be read efficiently by + most CPUs. WC memory is a good option for buffers that will be written by + the CPU and read by the GPU via mapped pinned memory or host->device + transfers. + + All of these flags are orthogonal to one another: a developer may allocate + memory that is portable, mapped and/or write-combined with no restrictions. + + The ::CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA contexts for + devices that do not support mapped pinned memory. The failure is deferred + to ::cuMemHostGetDevicePointer() because the memory may be mapped into + other CUDA contexts via the ::CU_MEMHOSTALLOC_PORTABLE flag. + + The memory allocated by this function must be freed with ::cuMemFreeHost(). + + Note all host memory allocated using ::cuMemHostAlloc() will automatically + be immediately accessible to all contexts on all devices which support unified + addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). + Unless the flag ::CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device pointer + that may be used to access this host memory from those contexts is always equal + to the returned host pointer \p *pp. If the flag ::CU_MEMHOSTALLOC_WRITECOMBINED + is specified, then the function ::cuMemHostGetDevicePointer() must be used + to query the device pointer, even if the context supports unified addressing. + See \ref CUDA_UNIFIED for additional details. + + \param pp - Returned pointer to host memory + \param bytesize - Requested allocation size in bytes + \param Flags - Flags for allocation request + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaHostAlloc*/ + fn cuMemHostAlloc( + pp: *mut *mut ::core::ffi::c_void, + bytesize: usize, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Passes back device pointer of mapped pinned memory + + Passes back the device pointer \p pdptr corresponding to the mapped, pinned + host buffer \p p allocated by ::cuMemHostAlloc. + + ::cuMemHostGetDevicePointer() will fail if the ::CU_MEMHOSTALLOC_DEVICEMAP + flag was not specified at the time the memory was allocated, or if the + function is called on a GPU that does not support mapped pinned memory. + + For devices that have a non-zero value for the device attribute + ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory + can also be accessed from the device using the host pointer \p p. + The device pointer returned by ::cuMemHostGetDevicePointer() may or may not + match the original host pointer \p p and depends on the devices visible to the + application. If all devices visible to the application have a non-zero value for the + device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() + will match the original pointer \p p. If any device visible to the application + has a zero value for the device attribute, the device pointer returned by + ::cuMemHostGetDevicePointer() will not match the original host pointer \p p, + but it will be suitable for use on all devices provided Unified Virtual Addressing + is enabled. In such systems, it is valid to access the memory using either pointer + on devices that have a non-zero value for the device attribute. Note however that + such devices should access the memory using only one of the two pointers and not both. + + \p Flags provides for future releases. For now, it must be set to 0. + + \param pdptr - Returned device pointer + \param p - Host pointer + \param Flags - Options (must be 0) + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaHostGetDevicePointer*/ + fn cuMemHostGetDevicePointer_v2( + pdptr: *mut cuda_types::CUdeviceptr, + p: *mut ::core::ffi::c_void, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Passes back flags that were used for a pinned allocation + + Passes back the flags \p pFlags that were specified when allocating + the pinned host buffer \p p allocated by ::cuMemHostAlloc. + + ::cuMemHostGetFlags() will fail if the pointer does not reside in + an allocation performed by ::cuMemAllocHost() or ::cuMemHostAlloc(). + + \param pFlags - Returned flags word + \param p - Host pointer + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cuMemAllocHost, + ::cuMemHostAlloc, + ::cudaHostGetFlags*/ + fn cuMemHostGetFlags( + pFlags: *mut ::core::ffi::c_uint, + p: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Allocates memory that will be automatically managed by the Unified Memory system + + Allocates \p bytesize bytes of managed memory on the device and returns in + \p *dptr a pointer to the allocated memory. If the device doesn't support + allocating managed memory, ::CUDA_ERROR_NOT_SUPPORTED is returned. Support + for managed memory can be queried using the device attribute + ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated memory is suitably + aligned for any kind of variable. The memory is not cleared. If \p bytesize + is 0, ::cuMemAllocManaged returns ::CUDA_ERROR_INVALID_VALUE. The pointer + is valid on the CPU and on all GPUs in the system that support managed memory. + All accesses to this pointer must obey the Unified Memory programming model. + + \p flags specifies the default stream association for this allocation. + \p flags must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST. If + ::CU_MEM_ATTACH_GLOBAL is specified, then this memory is accessible from + any stream on any device. If ::CU_MEM_ATTACH_HOST is specified, then the + allocation should not be accessed from devices that have a zero value for the + device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit call to + ::cuStreamAttachMemAsync will be required to enable access on such devices. + + If the association is later changed via ::cuStreamAttachMemAsync to + a single stream, the default association as specified during ::cuMemAllocManaged + is restored when that stream is destroyed. For __managed__ variables, the + default association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a + stream is an asynchronous operation, and as a result, the change to default + association won't happen until all work in the stream has completed. + + Memory allocated with ::cuMemAllocManaged should be released with ::cuMemFree. + + Device memory oversubscription is possible for GPUs that have a non-zero value for the + device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed memory on + such GPUs may be evicted from device memory to host memory at any time by the Unified + Memory driver in order to make room for other allocations. + + In a system where all GPUs have a non-zero value for the device attribute + ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, managed memory may not be populated when this + API returns and instead may be populated on access. In such systems, managed memory can + migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + maintain data locality and prevent excessive page faults to the extent possible. The application + can also guide the driver about memory usage patterns via ::cuMemAdvise. The application + can also explicitly migrate memory to a desired processor's memory via + ::cuMemPrefetchAsync. + + In a multi-GPU system where all of the GPUs have a zero value for the device attribute + ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the GPUs have peer-to-peer support + with each other, the physical storage for managed memory is created on the GPU which is active + at the time ::cuMemAllocManaged is called. All other GPUs will reference the data at reduced + bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + memory among such GPUs. + + In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + where the value of the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + is zero for at least one of those GPUs, the location chosen for physical storage of managed + memory is system-dependent. + - On Linux, the location chosen will be device memory as long as the current set of active + contexts are on devices that either have peer-to-peer support with each other or have a + non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + If there is an active context on a GPU that does not have a non-zero value for that device + attribute and it does not have peer-to-peer support with the other devices that have active + contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + Note that this means that managed memory that is located in device memory is migrated to + host memory if a new context is created on a GPU that doesn't have a non-zero value for + the device attribute and does not support peer-to-peer with at least one of the other devices + that has an active context. This in turn implies that context creation may fail if there is + insufficient host memory to migrate all managed allocations. + - On Windows, the physical storage is always created in 'zero-copy' or host memory. + All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + restrict CUDA to only use those GPUs that have peer-to-peer support. + Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a + non-zero value to force the driver to always use device memory for physical storage. + When this environment variable is set to a non-zero value, all contexts created in + that process on devices that support managed memory have to be peer-to-peer compatible + with each other. Context creation will fail if a context is created on a device that + supports managed memory and is not peer-to-peer compatible with any of the other + managed memory supporting devices on which contexts were previously created, even if + those contexts have been destroyed. These environment variables are described + in the CUDA programming guide under the "CUDA environment variables" section. + - On ARM, managed memory is not available on discrete gpu with Drive PX-2. + + \param dptr - Returned device pointer + \param bytesize - Requested allocation size in bytes + \param flags - Must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cuDeviceGetAttribute, ::cuStreamAttachMemAsync, + ::cudaMallocManaged*/ + fn cuMemAllocManaged( + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Registers a callback function to receive async notifications + + Registers \p callbackFunc to receive async notifications. + + The \p userData parameter is passed to the callback function at async notification time. + Likewise, \p callback is also passed to the callback function to distinguish between + multiple registered callbacks. + + The callback function being registered should be designed to return quickly (~10ms). + Any long running tasks should be queued for execution on an application thread. + + Callbacks may not call cuDeviceRegisterAsyncNotification or cuDeviceUnregisterAsyncNotification. + Doing so will result in ::CUDA_ERROR_NOT_PERMITTED. Async notification callbacks execute + in an undefined order and may be serialized. + + Returns in \p *callback a handle representing the registered callback instance. + + \param device - The device on which to register the callback + \param callbackFunc - The function to register as a callback + \param userData - A generic pointer to user data. This is passed into the callback function. + \param callback - A handle representing the registered callback instance + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_NOT_SUPPORTED + ::CUDA_ERROR_INVALID_DEVICE + ::CUDA_ERROR_INVALID_VALUE + ::CUDA_ERROR_NOT_PERMITTED + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cuDeviceUnregisterAsyncNotification*/ + fn cuDeviceRegisterAsyncNotification( + device: cuda_types::CUdevice, + callbackFunc: cuda_types::CUasyncCallback, + userData: *mut ::core::ffi::c_void, + callback: *mut cuda_types::CUasyncCallbackHandle, + ) -> cuda_types::CUresult; + /** \brief Unregisters an async notification callback + + Unregisters \p callback so that the corresponding callback function will stop receiving + async notifications. + + \param device - The device from which to remove \p callback. + \param callback - The callback instance to unregister from receiving async notifications. + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_NOT_SUPPORTED + ::CUDA_ERROR_INVALID_DEVICE + ::CUDA_ERROR_INVALID_VALUE + ::CUDA_ERROR_NOT_PERMITTED + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cuDeviceRegisterAsyncNotification*/ + fn cuDeviceUnregisterAsyncNotification( + device: cuda_types::CUdevice, + callback: cuda_types::CUasyncCallbackHandle, + ) -> cuda_types::CUresult; + /** \brief Returns a handle to a compute device + + Returns in \p *device a device handle given a PCI bus ID string. + + \param dev - Returned device handle + + \param pciBusId - String in one of the following forms: + [domain]:[bus]:[device].[function] + [domain]:[bus]:[device] + [bus]:[device].[function] + where \p domain, \p bus, \p device, and \p function are all hexadecimal values + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGet, + ::cuDeviceGetAttribute, + ::cuDeviceGetPCIBusId, + ::cudaDeviceGetByPCIBusId*/ + fn cuDeviceGetByPCIBusId( + dev: *mut cuda_types::CUdevice, + pciBusId: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + /** \brief Returns a PCI Bus Id string for the device + + Returns an ASCII string identifying the device \p dev in the NULL-terminated + string pointed to by \p pciBusId. \p len specifies the maximum length of the + string that may be returned. + + \param pciBusId - Returned identifier string for the device in the following format + [domain]:[bus]:[device].[function] + where \p domain, \p bus, \p device, and \p function are all hexadecimal values. + pciBusId should be large enough to store 13 characters including the NULL-terminator. + + \param len - Maximum length of string to store in \p name + + \param dev - Device to get identifier string for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuDeviceGet, + ::cuDeviceGetAttribute, + ::cuDeviceGetByPCIBusId, + ::cudaDeviceGetPCIBusId*/ + fn cuDeviceGetPCIBusId( + pciBusId: *mut ::core::ffi::c_char, + len: ::core::ffi::c_int, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Gets an interprocess handle for a previously allocated event + + Takes as input a previously allocated event. This event must have been + created with the ::CU_EVENT_INTERPROCESS and ::CU_EVENT_DISABLE_TIMING + flags set. This opaque handle may be copied into other processes and + opened with ::cuIpcOpenEventHandle to allow efficient hardware + synchronization between GPU work in different processes. + + After the event has been opened in the importing process, + ::cuEventRecord, ::cuEventSynchronize, ::cuStreamWaitEvent and + ::cuEventQuery may be used in either process. Performing operations + on the imported event after the exported event has been freed + with ::cuEventDestroy will result in undefined behavior. + + IPC functionality is restricted to devices with support for unified + addressing on Linux and Windows operating systems. + IPC functionality on Windows is restricted to GPUs in TCC mode + Users can test their device for IPC functionality by calling + ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + + \param pHandle - Pointer to a user allocated CUipcEventHandle + in which to return the opaque event handle + \param event - Event allocated with ::CU_EVENT_INTERPROCESS and + ::CU_EVENT_DISABLE_TIMING flags. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_MAP_FAILED, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuEventCreate, + ::cuEventDestroy, + ::cuEventSynchronize, + ::cuEventQuery, + ::cuStreamWaitEvent, + ::cuIpcOpenEventHandle, + ::cuIpcGetMemHandle, + ::cuIpcOpenMemHandle, + ::cuIpcCloseMemHandle, + ::cudaIpcGetEventHandle*/ + fn cuIpcGetEventHandle( + pHandle: *mut cuda_types::CUipcEventHandle, + event: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Opens an interprocess event handle for use in the current process + + Opens an interprocess event handle exported from another process with + ::cuIpcGetEventHandle. This function returns a ::CUevent that behaves like + a locally created event with the ::CU_EVENT_DISABLE_TIMING flag specified. + This event must be freed with ::cuEventDestroy. + + Performing operations on the imported event after the exported event has + been freed with ::cuEventDestroy will result in undefined behavior. + + IPC functionality is restricted to devices with support for unified + addressing on Linux and Windows operating systems. + IPC functionality on Windows is restricted to GPUs in TCC mode + Users can test their device for IPC functionality by calling + ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + + \param phEvent - Returns the imported event + \param handle - Interprocess handle to open + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_MAP_FAILED, + ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuEventCreate, + ::cuEventDestroy, + ::cuEventSynchronize, + ::cuEventQuery, + ::cuStreamWaitEvent, + ::cuIpcGetEventHandle, + ::cuIpcGetMemHandle, + ::cuIpcOpenMemHandle, + ::cuIpcCloseMemHandle, + ::cudaIpcOpenEventHandle*/ + fn cuIpcOpenEventHandle( + phEvent: *mut cuda_types::CUevent, + handle: cuda_types::CUipcEventHandle, + ) -> cuda_types::CUresult; + /** \brief Gets an interprocess memory handle for an existing device memory + allocation + + Takes a pointer to the base of an existing device memory allocation created + with ::cuMemAlloc and exports it for use in another process. This is a + lightweight operation and may be called multiple times on an allocation + without adverse effects. + + If a region of memory is freed with ::cuMemFree and a subsequent call + to ::cuMemAlloc returns memory with the same device address, + ::cuIpcGetMemHandle will return a unique handle for the + new memory. + + IPC functionality is restricted to devices with support for unified + addressing on Linux and Windows operating systems. + IPC functionality on Windows is restricted to GPUs in TCC mode + Users can test their device for IPC functionality by calling + ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + + \param pHandle - Pointer to user allocated ::CUipcMemHandle to return + the handle in. + \param dptr - Base pointer to previously allocated device memory + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_MAP_FAILED, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuMemAlloc, + ::cuMemFree, + ::cuIpcGetEventHandle, + ::cuIpcOpenEventHandle, + ::cuIpcOpenMemHandle, + ::cuIpcCloseMemHandle, + ::cudaIpcGetMemHandle*/ + fn cuIpcGetMemHandle( + pHandle: *mut cuda_types::CUipcMemHandle, + dptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Opens an interprocess memory handle exported from another process + and returns a device pointer usable in the local process. + + Maps memory exported from another process with ::cuIpcGetMemHandle into + the current device address space. For contexts on different devices + ::cuIpcOpenMemHandle can attempt to enable peer access between the + devices as if the user called ::cuCtxEnablePeerAccess. This behavior is + controlled by the ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag. + ::cuDeviceCanAccessPeer can determine if a mapping is possible. + + Contexts that may open ::CUipcMemHandles are restricted in the following way. + ::CUipcMemHandles from each ::CUdevice in a given process may only be opened + by one ::CUcontext per ::CUdevice per other process. + + If the memory handle has already been opened by the current context, the + reference count on the handle is incremented by 1 and the existing device pointer + is returned. + + Memory returned from ::cuIpcOpenMemHandle must be freed with + ::cuIpcCloseMemHandle. + + Calling ::cuMemFree on an exported memory region before calling + ::cuIpcCloseMemHandle in the importing context will result in undefined + behavior. + + IPC functionality is restricted to devices with support for unified + addressing on Linux and Windows operating systems. + IPC functionality on Windows is restricted to GPUs in TCC mode + Users can test their device for IPC functionality by calling + ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + + \param pdptr - Returned device pointer + \param handle - ::CUipcMemHandle to open + \param Flags - Flags for this operation. Must be specified as ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_MAP_FAILED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_TOO_MANY_PEERS, + ::CUDA_ERROR_INVALID_VALUE + + \note No guarantees are made about the address returned in \p *pdptr. + In particular, multiple processes may not receive the same address for the same \p handle. + + \sa + ::cuMemAlloc, + ::cuMemFree, + ::cuIpcGetEventHandle, + ::cuIpcOpenEventHandle, + ::cuIpcGetMemHandle, + ::cuIpcCloseMemHandle, + ::cuCtxEnablePeerAccess, + ::cuDeviceCanAccessPeer, + ::cudaIpcOpenMemHandle*/ + fn cuIpcOpenMemHandle_v2( + pdptr: *mut cuda_types::CUdeviceptr, + handle: cuda_types::CUipcMemHandle, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Attempts to close memory mapped with ::cuIpcOpenMemHandle + + Decrements the reference count of the memory returned by ::cuIpcOpenMemHandle by 1. + When the reference count reaches 0, this API unmaps the memory. The original allocation + in the exporting process as well as imported mappings in other processes + will be unaffected. + + Any resources used to enable peer access will be freed if this is the + last mapping using them. + + IPC functionality is restricted to devices with support for unified + addressing on Linux and Windows operating systems. + IPC functionality on Windows is restricted to GPUs in TCC mode + Users can test their device for IPC functionality by calling + ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + + \param dptr - Device pointer returned by ::cuIpcOpenMemHandle + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_MAP_FAILED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + \sa + ::cuMemAlloc, + ::cuMemFree, + ::cuIpcGetEventHandle, + ::cuIpcOpenEventHandle, + ::cuIpcGetMemHandle, + ::cuIpcOpenMemHandle, + ::cudaIpcCloseMemHandle*/ + fn cuIpcCloseMemHandle(dptr: cuda_types::CUdeviceptr) -> cuda_types::CUresult; + /** \brief Registers an existing host memory range for use by CUDA + + Page-locks the memory range specified by \p p and \p bytesize and maps it + for the device(s) as specified by \p Flags. This memory range also is added + to the same tracking mechanism as ::cuMemHostAlloc to automatically accelerate + calls to functions such as ::cuMemcpyHtoD(). Since the memory can be accessed + directly by the device, it can be read or written with much higher bandwidth + than pageable memory that has not been registered. Page-locking excessive + amounts of memory may degrade system performance, since it reduces the amount + of memory available to the system for paging. As a result, this function is + best used sparingly to register staging areas for data exchange between + host and device. + + On systems where ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + is true, ::cuMemHostRegister will not page-lock the memory range specified + by \p ptr but only populate unpopulated pages. + + The \p Flags parameter enables different options to be specified that + affect the allocation, as follows. + + - ::CU_MEMHOSTREGISTER_PORTABLE: The memory returned by this call will be + considered as pinned memory by all CUDA contexts, not just the one that + performed the allocation. + + - ::CU_MEMHOSTREGISTER_DEVICEMAP: Maps the allocation into the CUDA address + space. The device pointer to the memory may be obtained by calling + ::cuMemHostGetDevicePointer(). + + - ::CU_MEMHOSTREGISTER_IOMEMORY: The pointer is treated as pointing to some + I/O memory space, e.g. the PCI Express resource of a 3rd party device. + + - ::CU_MEMHOSTREGISTER_READ_ONLY: The pointer is treated as pointing to memory + that is considered read-only by the device. On platforms without + ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is + required in order to register memory mapped to the CPU as read-only. Support + for the use of this flag can be queried from the device attribute + ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with + a current context associated with a device that does not have this attribute + set will cause ::cuMemHostRegister to error with CUDA_ERROR_NOT_SUPPORTED. + + All of these flags are orthogonal to one another: a developer may page-lock + memory that is portable or mapped with no restrictions. + + The ::CU_MEMHOSTREGISTER_DEVICEMAP flag may be specified on CUDA contexts for + devices that do not support mapped pinned memory. The failure is deferred + to ::cuMemHostGetDevicePointer() because the memory may be mapped into + other CUDA contexts via the ::CU_MEMHOSTREGISTER_PORTABLE flag. + + For devices that have a non-zero value for the device attribute + ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory + can also be accessed from the device using the host pointer \p p. + The device pointer returned by ::cuMemHostGetDevicePointer() may or may not + match the original host pointer \p ptr and depends on the devices visible to the + application. If all devices visible to the application have a non-zero value for the + device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() + will match the original pointer \p ptr. If any device visible to the application + has a zero value for the device attribute, the device pointer returned by + ::cuMemHostGetDevicePointer() will not match the original host pointer \p ptr, + but it will be suitable for use on all devices provided Unified Virtual Addressing + is enabled. In such systems, it is valid to access the memory using either pointer + on devices that have a non-zero value for the device attribute. Note however that + such devices should access the memory using only of the two pointers and not both. + + The memory page-locked by this function must be unregistered with + ::cuMemHostUnregister(). + + \param p - Host pointer to memory to page-lock + \param bytesize - Size in bytes of the address range to page-lock + \param Flags - Flags for allocation request + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa + ::cuMemHostUnregister, + ::cuMemHostGetFlags, + ::cuMemHostGetDevicePointer, + ::cudaHostRegister*/ + fn cuMemHostRegister_v2( + p: *mut ::core::ffi::c_void, + bytesize: usize, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Unregisters a memory range that was registered with cuMemHostRegister. + + Unmaps the memory range whose base address is specified by \p p, and makes + it pageable again. + + The base address must be the same one specified to ::cuMemHostRegister(). + + \param p - Host pointer to memory to unregister + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED, + \notefnerr + + \sa + ::cuMemHostRegister, + ::cudaHostUnregister*/ + fn cuMemHostUnregister(p: *mut ::core::ffi::c_void) -> cuda_types::CUresult; + /** \brief Copies memory + + Copies data between two pointers. + \p dst and \p src are base pointers of the destination and source, respectively. + \p ByteCount specifies the number of bytes to copy. + Note that this function infers the type of the transfer (host to host, host to + device, device to device, or device to host) from the pointer values. This + function is only allowed in contexts which support unified addressing. + + \param dst - Destination unified virtual address space pointer + \param src - Source unified virtual address space pointer + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpy, + ::cudaMemcpyToSymbol, + ::cudaMemcpyFromSymbol*/ + fn cuMemcpy_ptds( + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies device memory between two contexts + + Copies from device memory in one context to device memory in another + context. \p dstDevice is the base device pointer of the destination memory + and \p dstContext is the destination context. \p srcDevice is the base + device pointer of the source memory and \p srcContext is the source pointer. + \p ByteCount specifies the number of bytes to copy. + + \param dstDevice - Destination device pointer + \param dstContext - Destination context + \param srcDevice - Source device pointer + \param srcContext - Source context + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuMemcpyDtoD, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + ::cuMemcpy3DPeerAsync, + ::cudaMemcpyPeer*/ + fn cuMemcpyPeer_ptds( + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Host to Device + + Copies from host memory to device memory. \p dstDevice and \p srcHost are + the base addresses of the destination and source, respectively. \p ByteCount + specifies the number of bytes to copy. + + \param dstDevice - Destination device pointer + \param srcHost - Source host pointer + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpy, + ::cudaMemcpyToSymbol*/ + fn cuMemcpyHtoD_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Device to Host + + Copies from device to host memory. \p dstHost and \p srcDevice specify the + base pointers of the destination and source, respectively. \p ByteCount + specifies the number of bytes to copy. + + \param dstHost - Destination host pointer + \param srcDevice - Source device pointer + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpy, + ::cudaMemcpyFromSymbol*/ + fn cuMemcpyDtoH_v2_ptds( + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Device to Device + + Copies from device memory to device memory. \p dstDevice and \p srcDevice + are the base pointers of the destination and source, respectively. + \p ByteCount specifies the number of bytes to copy. + + \param dstDevice - Destination device pointer + \param srcDevice - Source device pointer + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpy, + ::cudaMemcpyToSymbol, + ::cudaMemcpyFromSymbol*/ + fn cuMemcpyDtoD_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Device to Array + + Copies from device memory to a 1D CUDA array. \p dstArray and \p dstOffset + specify the CUDA array handle and starting index of the destination data. + \p srcDevice specifies the base pointer of the source. \p ByteCount + specifies the number of bytes to copy. + + \param dstArray - Destination array + \param dstOffset - Offset in bytes of destination array + \param srcDevice - Source device pointer + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpyToArray*/ + fn cuMemcpyDtoA_v2_ptds( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Array to Device + + Copies from one 1D CUDA array to device memory. \p dstDevice specifies the + base pointer of the destination and must be naturally aligned with the CUDA + array elements. \p srcArray and \p srcOffset specify the CUDA array handle + and the offset in bytes into the array where the copy is to begin. + \p ByteCount specifies the number of bytes to copy and must be evenly + divisible by the array element size. + + \param dstDevice - Destination device pointer + \param srcArray - Source array + \param srcOffset - Offset in bytes of source array + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpyFromArray*/ + fn cuMemcpyAtoD_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Host to Array + + Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset + specify the CUDA array handle and starting offset in bytes of the destination + data. \p pSrc specifies the base address of the source. \p ByteCount specifies + the number of bytes to copy. + + \param dstArray - Destination array + \param dstOffset - Offset in bytes of destination array + \param srcHost - Source host pointer + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpyToArray*/ + fn cuMemcpyHtoA_v2_ptds( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Array to Host + + Copies from one 1D CUDA array to host memory. \p dstHost specifies the base + pointer of the destination. \p srcArray and \p srcOffset specify the CUDA + array handle and starting offset in bytes of the source data. + \p ByteCount specifies the number of bytes to copy. + + \param dstHost - Destination device pointer + \param srcArray - Source array + \param srcOffset - Offset in bytes of source array + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpyFromArray*/ + fn cuMemcpyAtoH_v2_ptds( + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Array to Array + + Copies from one 1D CUDA array to another. \p dstArray and \p srcArray + specify the handles of the destination and source CUDA arrays for the copy, + respectively. \p dstOffset and \p srcOffset specify the destination and + source offsets in bytes into the CUDA arrays. \p ByteCount is the number of + bytes to be copied. The size of the elements in the CUDA arrays need not be + the same format, but the elements must be the same size; and count must be + evenly divisible by that size. + + \param dstArray - Destination array + \param dstOffset - Offset in bytes of destination array + \param srcArray - Source array + \param srcOffset - Offset in bytes of source array + \param ByteCount - Size of memory copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpyArrayToArray*/ + fn cuMemcpyAtoA_v2_ptds( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + ) -> cuda_types::CUresult; + /** \brief Copies memory for 2D arrays + + Perform a 2D memory copy according to the parameters specified in \p pCopy. + The ::CUDA_MEMCPY2D structure is defined as: + + \code +typedef struct CUDA_MEMCPY2D_st { +unsigned int srcXInBytes, srcY; +CUmemorytype srcMemoryType; +const void *srcHost; +CUdeviceptr srcDevice; +CUarray srcArray; +unsigned int srcPitch; + +unsigned int dstXInBytes, dstY; +CUmemorytype dstMemoryType; +void *dstHost; +CUdeviceptr dstDevice; +CUarray dstArray; +unsigned int dstPitch; + +unsigned int WidthInBytes; +unsigned int Height; +} CUDA_MEMCPY2D; + \endcode + where: + - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + source and destination, respectively; ::CUmemorytype_enum is defined as: + + \code +typedef enum CUmemorytype_enum { +CU_MEMORYTYPE_HOST = 0x01, +CU_MEMORYTYPE_DEVICE = 0x02, +CU_MEMORYTYPE_ARRAY = 0x03, +CU_MEMORYTYPE_UNIFIED = 0x04 +} CUmemorytype; + \endcode + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::srcArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + specify the (host) base address of the source data and the bytes per row to + apply. ::srcArray is ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + specify the (device) base address of the source data and the bytes per row + to apply. ::srcArray is ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + specify the (host) base address of the destination data and the bytes per + row to apply. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::dstArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + specify the (device) base address of the destination data and the bytes per + row to apply. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + ignored. + + - ::srcXInBytes and ::srcY specify the base address of the source data for + the copy. + + \par + For host pointers, the starting address is + \code +void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + \endcode + + \par + For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + element size. + + - ::dstXInBytes and ::dstY specify the base address of the destination data + for the copy. + + \par + For host pointers, the base address is + \code +void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + \endcode + + \par + For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + element size. + + - ::WidthInBytes and ::Height specify the width (in bytes) and height of + the 2D copy being performed. + - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + ::srcXInBytes, and ::dstPitch must be greater than or equal to + ::WidthInBytes + dstXInBytes. + + \par + ::cuMemcpy2D() returns an error if any pitch is greater than the maximum + allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + (device to device, CUDA array to device, CUDA array to CUDA array), + ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). + ::cuMemcpy2DUnaligned() does not have this restriction, but may run + significantly slower in the cases where ::cuMemcpy2D() would have returned + an error code. + + \param pCopy - Parameters for the memory copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpy2D, + ::cudaMemcpy2DToArray, + ::cudaMemcpy2DFromArray*/ + fn cuMemcpy2D_v2_ptds( + pCopy: *const cuda_types::CUDA_MEMCPY2D, + ) -> cuda_types::CUresult; + /** \brief Copies memory for 2D arrays + + Perform a 2D memory copy according to the parameters specified in \p pCopy. + The ::CUDA_MEMCPY2D structure is defined as: + + \code +typedef struct CUDA_MEMCPY2D_st { +unsigned int srcXInBytes, srcY; +CUmemorytype srcMemoryType; +const void *srcHost; +CUdeviceptr srcDevice; +CUarray srcArray; +unsigned int srcPitch; +unsigned int dstXInBytes, dstY; +CUmemorytype dstMemoryType; +void *dstHost; +CUdeviceptr dstDevice; +CUarray dstArray; +unsigned int dstPitch; +unsigned int WidthInBytes; +unsigned int Height; +} CUDA_MEMCPY2D; + \endcode + where: + - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + source and destination, respectively; ::CUmemorytype_enum is defined as: + + \code +typedef enum CUmemorytype_enum { +CU_MEMORYTYPE_HOST = 0x01, +CU_MEMORYTYPE_DEVICE = 0x02, +CU_MEMORYTYPE_ARRAY = 0x03, +CU_MEMORYTYPE_UNIFIED = 0x04 +} CUmemorytype; + \endcode + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::srcArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + specify the (host) base address of the source data and the bytes per row to + apply. ::srcArray is ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + specify the (device) base address of the source data and the bytes per row + to apply. ::srcArray is ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::dstArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + specify the (host) base address of the destination data and the bytes per + row to apply. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + specify the (device) base address of the destination data and the bytes per + row to apply. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + ignored. + + - ::srcXInBytes and ::srcY specify the base address of the source data for + the copy. + + \par + For host pointers, the starting address is + \code +void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + \endcode + + \par + For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + element size. + + - ::dstXInBytes and ::dstY specify the base address of the destination data + for the copy. + + \par + For host pointers, the base address is + \code +void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + \endcode + + \par + For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + element size. + + - ::WidthInBytes and ::Height specify the width (in bytes) and height of + the 2D copy being performed. + - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + ::srcXInBytes, and ::dstPitch must be greater than or equal to + ::WidthInBytes + dstXInBytes. + + \par + ::cuMemcpy2D() returns an error if any pitch is greater than the maximum + allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + (device to device, CUDA array to device, CUDA array to CUDA array), + ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). + ::cuMemcpy2DUnaligned() does not have this restriction, but may run + significantly slower in the cases where ::cuMemcpy2D() would have returned + an error code. + + \param pCopy - Parameters for the memory copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpy2D, + ::cudaMemcpy2DToArray, + ::cudaMemcpy2DFromArray*/ + fn cuMemcpy2DUnaligned_v2_ptds( + pCopy: *const cuda_types::CUDA_MEMCPY2D, + ) -> cuda_types::CUresult; + /** \brief Copies memory for 3D arrays + + Perform a 3D memory copy according to the parameters specified in + \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: + + \code +typedef struct CUDA_MEMCPY3D_st { + +unsigned int srcXInBytes, srcY, srcZ; +unsigned int srcLOD; +CUmemorytype srcMemoryType; +const void *srcHost; +CUdeviceptr srcDevice; +CUarray srcArray; +unsigned int srcPitch; // ignored when src is array +unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 + +unsigned int dstXInBytes, dstY, dstZ; +unsigned int dstLOD; +CUmemorytype dstMemoryType; +void *dstHost; +CUdeviceptr dstDevice; +CUarray dstArray; +unsigned int dstPitch; // ignored when dst is array +unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 + +unsigned int WidthInBytes; +unsigned int Height; +unsigned int Depth; +} CUDA_MEMCPY3D; + \endcode + where: + - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + source and destination, respectively; ::CUmemorytype_enum is defined as: + + \code +typedef enum CUmemorytype_enum { +CU_MEMORYTYPE_HOST = 0x01, +CU_MEMORYTYPE_DEVICE = 0x02, +CU_MEMORYTYPE_ARRAY = 0x03, +CU_MEMORYTYPE_UNIFIED = 0x04 +} CUmemorytype; + \endcode + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::srcArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and + ::srcHeight specify the (host) base address of the source data, the bytes + per row, and the height of each 2D slice of the 3D array. ::srcArray is + ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and + ::srcHeight specify the (device) base address of the source data, the bytes + per row, and the height of each 2D slice of the 3D array. ::srcArray is + ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and + ::srcHeight are ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::dstArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + specify the (host) base address of the destination data, the bytes per row, + and the height of each 2D slice of the 3D array. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + specify the (device) base address of the destination data, the bytes per + row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and + ::dstHeight are ignored. + + - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source + data for the copy. + + \par + For host pointers, the starting address is + \code +void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; + \endcode + + \par + For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + element size. + + - dstXInBytes, ::dstY and ::dstZ specify the base address of the + destination data for the copy. + + \par + For host pointers, the base address is + \code +void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; + \endcode + + \par + For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + element size. + + - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height + and depth of the 3D copy being performed. + - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + ::srcXInBytes, and ::dstPitch must be greater than or equal to + ::WidthInBytes + dstXInBytes. + - If specified, ::srcHeight must be greater than or equal to ::Height + + ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + + \par + ::cuMemcpy3D() returns an error if any pitch is greater than the maximum + allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). + + The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be + set to 0. + + \param pCopy - Parameters for the memory copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMemcpy3D*/ + fn cuMemcpy3D_v2_ptds( + pCopy: *const cuda_types::CUDA_MEMCPY3D, + ) -> cuda_types::CUresult; + /** \brief Copies memory between contexts + + Perform a 3D memory copy according to the parameters specified in + \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure + for documentation of its parameters. + + \param pCopy - Parameters for the memory copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_sync + + \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + ::cuMemcpy3DPeerAsync, + ::cudaMemcpy3DPeer*/ + fn cuMemcpy3DPeer_ptds( + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, + ) -> cuda_types::CUresult; + /** \brief Copies memory asynchronously + + Copies data between two pointers. + \p dst and \p src are base pointers of the destination and source, respectively. + \p ByteCount specifies the number of bytes to copy. + Note that this function infers the type of the transfer (host to host, host to + device, device to device, or device to host) from the pointer values. This + function is only allowed in contexts which support unified addressing. + + \param dst - Destination unified virtual address space pointer + \param src - Source unified virtual address space pointer + \param ByteCount - Size of memory copy in bytes + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpyAsync, + ::cudaMemcpyToSymbolAsync, + ::cudaMemcpyFromSymbolAsync*/ + fn cuMemcpyAsync_ptsz( + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies device memory between two contexts asynchronously. + + Copies from device memory in one context to device memory in another + context. \p dstDevice is the base device pointer of the destination memory + and \p dstContext is the destination context. \p srcDevice is the base + device pointer of the source memory and \p srcContext is the source pointer. + \p ByteCount specifies the number of bytes to copy. + + \param dstDevice - Destination device pointer + \param dstContext - Destination context + \param srcDevice - Source device pointer + \param srcContext - Source context + \param ByteCount - Size of memory copy in bytes + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, + ::cuMemcpy3DPeerAsync, + ::cudaMemcpyPeerAsync*/ + fn cuMemcpyPeerAsync_ptsz( + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Host to Device + + Copies from host memory to device memory. \p dstDevice and \p srcHost are + the base addresses of the destination and source, respectively. \p ByteCount + specifies the number of bytes to copy. + + \param dstDevice - Destination device pointer + \param srcHost - Source host pointer + \param ByteCount - Size of memory copy in bytes + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpyAsync, + ::cudaMemcpyToSymbolAsync*/ + fn cuMemcpyHtoDAsync_v2_ptsz( + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Device to Host + + Copies from device to host memory. \p dstHost and \p srcDevice specify the + base pointers of the destination and source, respectively. \p ByteCount + specifies the number of bytes to copy. + + \param dstHost - Destination host pointer + \param srcDevice - Source device pointer + \param ByteCount - Size of memory copy in bytes + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpyAsync, + ::cudaMemcpyFromSymbolAsync*/ + fn cuMemcpyDtoHAsync_v2_ptsz( + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Device to Device + + Copies from device memory to device memory. \p dstDevice and \p srcDevice + are the base pointers of the destination and source, respectively. + \p ByteCount specifies the number of bytes to copy. + + \param dstDevice - Destination device pointer + \param srcDevice - Source device pointer + \param ByteCount - Size of memory copy in bytes + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpyAsync, + ::cudaMemcpyToSymbolAsync, + ::cudaMemcpyFromSymbolAsync*/ + fn cuMemcpyDtoDAsync_v2_ptsz( + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Host to Array + + Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset + specify the CUDA array handle and starting offset in bytes of the + destination data. \p srcHost specifies the base address of the source. + \p ByteCount specifies the number of bytes to copy. + + \param dstArray - Destination array + \param dstOffset - Offset in bytes of destination array + \param srcHost - Source host pointer + \param ByteCount - Size of memory copy in bytes + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpyToArrayAsync*/ + fn cuMemcpyHtoAAsync_v2_ptsz( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory from Array to Host + + Copies from one 1D CUDA array to host memory. \p dstHost specifies the base + pointer of the destination. \p srcArray and \p srcOffset specify the CUDA + array handle and starting offset in bytes of the source data. + \p ByteCount specifies the number of bytes to copy. + + \param dstHost - Destination pointer + \param srcArray - Source array + \param srcOffset - Offset in bytes of source array + \param ByteCount - Size of memory copy in bytes + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + \note_memcpy + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpyFromArrayAsync*/ + fn cuMemcpyAtoHAsync_v2_ptsz( + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory for 2D arrays + + Perform a 2D memory copy according to the parameters specified in \p pCopy. + The ::CUDA_MEMCPY2D structure is defined as: + + \code +typedef struct CUDA_MEMCPY2D_st { +unsigned int srcXInBytes, srcY; +CUmemorytype srcMemoryType; +const void *srcHost; +CUdeviceptr srcDevice; +CUarray srcArray; +unsigned int srcPitch; +unsigned int dstXInBytes, dstY; +CUmemorytype dstMemoryType; +void *dstHost; +CUdeviceptr dstDevice; +CUarray dstArray; +unsigned int dstPitch; +unsigned int WidthInBytes; +unsigned int Height; +} CUDA_MEMCPY2D; + \endcode + where: + - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + source and destination, respectively; ::CUmemorytype_enum is defined as: + + \code +typedef enum CUmemorytype_enum { +CU_MEMORYTYPE_HOST = 0x01, +CU_MEMORYTYPE_DEVICE = 0x02, +CU_MEMORYTYPE_ARRAY = 0x03, +CU_MEMORYTYPE_UNIFIED = 0x04 +} CUmemorytype; + \endcode + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + specify the (host) base address of the source data and the bytes per row to + apply. ::srcArray is ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::srcArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + specify the (device) base address of the source data and the bytes per row + to apply. ::srcArray is ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::dstArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + specify the (host) base address of the destination data and the bytes per + row to apply. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + specify the (device) base address of the destination data and the bytes per + row to apply. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + ignored. + + - ::srcXInBytes and ::srcY specify the base address of the source data for + the copy. + + \par + For host pointers, the starting address is + \code +void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + \endcode + + \par + For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + element size. + + - ::dstXInBytes and ::dstY specify the base address of the destination data + for the copy. + + \par + For host pointers, the base address is + \code +void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + \endcode + + \par + For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + element size. + + - ::WidthInBytes and ::Height specify the width (in bytes) and height of + the 2D copy being performed. + - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + ::srcXInBytes, and ::dstPitch must be greater than or equal to + ::WidthInBytes + dstXInBytes. + - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + ::srcXInBytes, and ::dstPitch must be greater than or equal to + ::WidthInBytes + dstXInBytes. + - If specified, ::srcHeight must be greater than or equal to ::Height + + ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + + \par + ::cuMemcpy2DAsync() returns an error if any pitch is greater than the maximum + allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + (device to device, CUDA array to device, CUDA array to CUDA array), + ::cuMemcpy2DAsync() may fail for pitches not computed by ::cuMemAllocPitch(). + + \param pCopy - Parameters for the memory copy + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpy2DAsync, + ::cudaMemcpy2DToArrayAsync, + ::cudaMemcpy2DFromArrayAsync*/ + fn cuMemcpy2DAsync_v2_ptsz( + pCopy: *const cuda_types::CUDA_MEMCPY2D, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory for 3D arrays + + Perform a 3D memory copy according to the parameters specified in + \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: + + \code +typedef struct CUDA_MEMCPY3D_st { + +unsigned int srcXInBytes, srcY, srcZ; +unsigned int srcLOD; +CUmemorytype srcMemoryType; +const void *srcHost; +CUdeviceptr srcDevice; +CUarray srcArray; +unsigned int srcPitch; // ignored when src is array +unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 + +unsigned int dstXInBytes, dstY, dstZ; +unsigned int dstLOD; +CUmemorytype dstMemoryType; +void *dstHost; +CUdeviceptr dstDevice; +CUarray dstArray; +unsigned int dstPitch; // ignored when dst is array +unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 + +unsigned int WidthInBytes; +unsigned int Height; +unsigned int Depth; +} CUDA_MEMCPY3D; + \endcode + where: + - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + source and destination, respectively; ::CUmemorytype_enum is defined as: + + \code +typedef enum CUmemorytype_enum { +CU_MEMORYTYPE_HOST = 0x01, +CU_MEMORYTYPE_DEVICE = 0x02, +CU_MEMORYTYPE_ARRAY = 0x03, +CU_MEMORYTYPE_UNIFIED = 0x04 +} CUmemorytype; + \endcode + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::srcArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and + ::srcHeight specify the (host) base address of the source data, the bytes + per row, and the height of each 2D slice of the 3D array. ::srcArray is + ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and + ::srcHeight specify the (device) base address of the source data, the bytes + per row, and the height of each 2D slice of the 3D array. ::srcArray is + ignored. + + \par + If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and + ::srcHeight are ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + specify the (unified virtual address space) base address of the source data + and the bytes per row to apply. ::dstArray is ignored. + This value may be used only if unified addressing is supported in the calling + context. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + specify the (host) base address of the destination data, the bytes per row, + and the height of each 2D slice of the 3D array. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + specify the (device) base address of the destination data, the bytes per + row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. + + \par + If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and + ::dstHeight are ignored. + + - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source + data for the copy. + + \par + For host pointers, the starting address is + \code +void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; + \endcode + + \par + For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + element size. + + - dstXInBytes, ::dstY and ::dstZ specify the base address of the + destination data for the copy. + + \par + For host pointers, the base address is + \code +void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); + \endcode + + \par + For device pointers, the starting address is + \code +CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; + \endcode + + \par + For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + element size. + + - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height + and depth of the 3D copy being performed. + - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + ::srcXInBytes, and ::dstPitch must be greater than or equal to + ::WidthInBytes + dstXInBytes. + - If specified, ::srcHeight must be greater than or equal to ::Height + + ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + + \par + ::cuMemcpy3DAsync() returns an error if any pitch is greater than the maximum + allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). + + The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be + set to 0. + + \param pCopy - Parameters for the memory copy + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemcpy3DAsync*/ + fn cuMemcpy3DAsync_v2_ptsz( + pCopy: *const cuda_types::CUDA_MEMCPY3D, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Copies memory between contexts asynchronously. + + Perform a 3D memory copy according to the parameters specified in + \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure + for documentation of its parameters. + + \param pCopy - Parameters for the memory copy + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + ::cuMemcpy3DPeerAsync, + ::cudaMemcpy3DPeerAsync*/ + fn cuMemcpy3DPeerAsync_ptsz( + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Initializes device memory + + Sets the memory range of \p N 8-bit values to the specified value + \p uc. + + \param dstDevice - Destination device pointer + \param uc - Value to set + \param N - Number of elements + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset*/ + fn cuMemsetD8_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, + ) -> cuda_types::CUresult; + /** \brief Initializes device memory + + Sets the memory range of \p N 16-bit values to the specified value + \p us. The \p dstDevice pointer must be two byte aligned. + + \param dstDevice - Destination device pointer + \param us - Value to set + \param N - Number of elements + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset*/ + fn cuMemsetD16_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, + ) -> cuda_types::CUresult; + /** \brief Initializes device memory + + Sets the memory range of \p N 32-bit values to the specified value + \p ui. The \p dstDevice pointer must be four byte aligned. + + \param dstDevice - Destination device pointer + \param ui - Value to set + \param N - Number of elements + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32Async, + ::cudaMemset*/ + fn cuMemsetD32_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, + ) -> cuda_types::CUresult; + /** \brief Initializes device memory + + Sets the 2D memory range of \p Width 8-bit values to the specified value + \p uc. \p Height specifies the number of rows to set, and \p dstPitch + specifies the number of bytes between each row. This function performs + fastest when the pitch is one that has been passed back by + ::cuMemAllocPitch(). + + \param dstDevice - Destination device pointer + \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + \param uc - Value to set + \param Width - Width of row + \param Height - Number of rows + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset2D*/ + fn cuMemsetD2D8_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, + ) -> cuda_types::CUresult; + /** \brief Initializes device memory + + Sets the 2D memory range of \p Width 16-bit values to the specified value + \p us. \p Height specifies the number of rows to set, and \p dstPitch + specifies the number of bytes between each row. The \p dstDevice pointer + and \p dstPitch offset must be two byte aligned. This function performs + fastest when the pitch is one that has been passed back by + ::cuMemAllocPitch(). + + \param dstDevice - Destination device pointer + \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + \param us - Value to set + \param Width - Width of row + \param Height - Number of rows + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset2D*/ + fn cuMemsetD2D16_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, + ) -> cuda_types::CUresult; + /** \brief Initializes device memory + + Sets the 2D memory range of \p Width 32-bit values to the specified value + \p ui. \p Height specifies the number of rows to set, and \p dstPitch + specifies the number of bytes between each row. The \p dstDevice pointer + and \p dstPitch offset must be four byte aligned. This function performs + fastest when the pitch is one that has been passed back by + ::cuMemAllocPitch(). + + \param dstDevice - Destination device pointer + \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + \param ui - Value to set + \param Width - Width of row + \param Height - Number of rows + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset2D*/ + fn cuMemsetD2D32_v2_ptds( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, + ) -> cuda_types::CUresult; + /** \brief Sets device memory + + Sets the memory range of \p N 8-bit values to the specified value + \p uc. + + \param dstDevice - Destination device pointer + \param uc - Value to set + \param N - Number of elements + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemsetAsync*/ + fn cuMemsetD8Async_ptsz( + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Sets device memory + + Sets the memory range of \p N 16-bit values to the specified value + \p us. The \p dstDevice pointer must be two byte aligned. + + \param dstDevice - Destination device pointer + \param us - Value to set + \param N - Number of elements + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemsetAsync*/ + fn cuMemsetD16Async_ptsz( + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Sets device memory + + Sets the memory range of \p N 32-bit values to the specified value + \p ui. The \p dstDevice pointer must be four byte aligned. + + \param dstDevice - Destination device pointer + \param ui - Value to set + \param N - Number of elements + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32, + ::cudaMemsetAsync*/ + fn cuMemsetD32Async_ptsz( + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Sets device memory + + Sets the 2D memory range of \p Width 8-bit values to the specified value + \p uc. \p Height specifies the number of rows to set, and \p dstPitch + specifies the number of bytes between each row. This function performs + fastest when the pitch is one that has been passed back by + ::cuMemAllocPitch(). + + \param dstDevice - Destination device pointer + \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + \param uc - Value to set + \param Width - Width of row + \param Height - Number of rows + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset2DAsync*/ + fn cuMemsetD2D8Async_ptsz( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Sets device memory + + Sets the 2D memory range of \p Width 16-bit values to the specified value + \p us. \p Height specifies the number of rows to set, and \p dstPitch + specifies the number of bytes between each row. The \p dstDevice pointer + and \p dstPitch offset must be two byte aligned. This function performs + fastest when the pitch is one that has been passed back by + ::cuMemAllocPitch(). + + \param dstDevice - Destination device pointer + \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + \param us - Value to set + \param Width - Width of row + \param Height - Number of rows + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset2DAsync*/ + fn cuMemsetD2D16Async_ptsz( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Sets device memory + + Sets the 2D memory range of \p Width 32-bit values to the specified value + \p ui. \p Height specifies the number of rows to set, and \p dstPitch + specifies the number of bytes between each row. The \p dstDevice pointer + and \p dstPitch offset must be four byte aligned. This function performs + fastest when the pitch is one that has been passed back by + ::cuMemAllocPitch(). + + \param dstDevice - Destination device pointer + \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + \param ui - Value to set + \param Width - Width of row + \param Height - Number of rows + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + \note_memset + \note_null_stream + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, + ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + ::cuMemsetD32, ::cuMemsetD32Async, + ::cudaMemset2DAsync*/ + fn cuMemsetD2D32Async_ptsz( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Creates a 1D or 2D CUDA array + + Creates a CUDA array according to the ::CUDA_ARRAY_DESCRIPTOR structure + \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. + The ::CUDA_ARRAY_DESCRIPTOR is defined as: + + \code +typedef struct { +unsigned int Width; +unsigned int Height; +CUarray_format Format; +unsigned int NumChannels; +} CUDA_ARRAY_DESCRIPTOR; + \endcode + where: + + - \p Width, and \p Height are the width, and height of the CUDA array (in + elements); the CUDA array is one-dimensional if height is 0, two-dimensional + otherwise; + - ::Format specifies the format of the elements; ::CUarray_format is + defined as: + \code +typedef enum CUarray_format_enum { +CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, +CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, +CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, +CU_AD_FORMAT_SIGNED_INT8 = 0x08, +CU_AD_FORMAT_SIGNED_INT16 = 0x09, +CU_AD_FORMAT_SIGNED_INT32 = 0x0a, +CU_AD_FORMAT_HALF = 0x10, +CU_AD_FORMAT_FLOAT = 0x20 +} CUarray_format; + \endcode + - \p NumChannels specifies the number of packed components per CUDA array + element; it may be 1, 2, or 4; + + Here are examples of CUDA array descriptions: + + Description for a CUDA array of 2048 floats: + \code +CUDA_ARRAY_DESCRIPTOR desc; +desc.Format = CU_AD_FORMAT_FLOAT; +desc.NumChannels = 1; +desc.Width = 2048; +desc.Height = 1; + \endcode + + Description for a 64 x 64 CUDA array of floats: + \code +CUDA_ARRAY_DESCRIPTOR desc; +desc.Format = CU_AD_FORMAT_FLOAT; +desc.NumChannels = 1; +desc.Width = 64; +desc.Height = 64; + \endcode + + Description for a \p width x \p height CUDA array of 64-bit, 4x16-bit + float16's: + \code +CUDA_ARRAY_DESCRIPTOR desc; +desc.Format = CU_AD_FORMAT_HALF; +desc.NumChannels = 4; +desc.Width = width; +desc.Height = height; + \endcode + + Description for a \p width x \p height CUDA array of 16-bit elements, each + of which is two 8-bit unsigned chars: + \code +CUDA_ARRAY_DESCRIPTOR arrayDesc; +desc.Format = CU_AD_FORMAT_UNSIGNED_INT8; +desc.NumChannels = 2; +desc.Width = width; +desc.Height = height; + \endcode + + \param pHandle - Returned array + \param pAllocateArray - Array descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMallocArray*/ + fn cuArrayCreate_v2( + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR, + ) -> cuda_types::CUresult; + /** \brief Get a 1D or 2D CUDA array descriptor + + Returns in \p *pArrayDescriptor a descriptor containing information on the + format and dimensions of the CUDA array \p hArray. It is useful for + subroutines that have been passed a CUDA array, but need to know the CUDA + array parameters for validation or other purposes. + + \param pArrayDescriptor - Returned array descriptor + \param hArray - Array to get descriptor of + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaArrayGetInfo*/ + fn cuArrayGetDescriptor_v2( + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR, + hArray: cuda_types::CUarray, + ) -> cuda_types::CUresult; + /** \brief Returns the layout properties of a sparse CUDA array + + Returns the layout properties of a sparse CUDA array in \p sparseProperties + If the CUDA array is not allocated with flag ::CUDA_ARRAY3D_SPARSE + ::CUDA_ERROR_INVALID_VALUE will be returned. + + If the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, + then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize represents the total size of the array. Otherwise, it will be zero. + Also, the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is always zero. + Note that the \p array must have been allocated using ::cuArrayCreate or ::cuArray3DCreate. For CUDA arrays obtained + using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. Instead, ::cuMipmappedArrayGetSparseProperties + must be used to obtain the sparse properties of the entire CUDA mipmapped array to which \p array belongs to. + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_INVALID_VALUE + + \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES + \param[in] array - CUDA array to get the sparse properties of + \sa ::cuMipmappedArrayGetSparseProperties, ::cuMemMapArrayAsync*/ + fn cuArrayGetSparseProperties( + sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES, + array: cuda_types::CUarray, + ) -> cuda_types::CUresult; + /** \brief Returns the layout properties of a sparse CUDA mipmapped array + + Returns the sparse array layout properties in \p sparseProperties + If the CUDA mipmapped array is not allocated with flag ::CUDA_ARRAY3D_SPARSE + ::CUDA_ERROR_INVALID_VALUE will be returned. + + For non-layered CUDA mipmapped arrays, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize returns the + size of the mip tail region. The mip tail region includes all mip levels whose width, height or depth + is less than that of the tile. + For layered CUDA mipmapped arrays, if ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, + then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies the size of the mip tail of all layers combined. + Otherwise, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies mip tail size per layer. + The returned value of ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is valid only if ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize is non-zero. + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_INVALID_VALUE + + \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES + \param[in] mipmap - CUDA mipmapped array to get the sparse properties of + \sa ::cuArrayGetSparseProperties, ::cuMemMapArrayAsync*/ + fn cuMipmappedArrayGetSparseProperties( + sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES, + mipmap: cuda_types::CUmipmappedArray, + ) -> cuda_types::CUresult; + /** \brief Returns the memory requirements of a CUDA array + + Returns the memory requirements of a CUDA array in \p memoryRequirements + If the CUDA array is not allocated with flag ::CUDA_ARRAY3D_DEFERRED_MAPPING + ::CUDA_ERROR_INVALID_VALUE will be returned. + + The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::size + represents the total size of the CUDA array. + The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::alignment + represents the alignment necessary for mapping the CUDA array. + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_INVALID_VALUE + + \param[out] memoryRequirements - Pointer to ::CUDA_ARRAY_MEMORY_REQUIREMENTS + \param[in] array - CUDA array to get the memory requirements of + \param[in] device - Device to get the memory requirements for + \sa ::cuMipmappedArrayGetMemoryRequirements, ::cuMemMapArrayAsync*/ + fn cuArrayGetMemoryRequirements( + memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS, + array: cuda_types::CUarray, + device: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Returns the memory requirements of a CUDA mipmapped array + + Returns the memory requirements of a CUDA mipmapped array in \p memoryRequirements + If the CUDA mipmapped array is not allocated with flag ::CUDA_ARRAY3D_DEFERRED_MAPPING + ::CUDA_ERROR_INVALID_VALUE will be returned. + + The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::size + represents the total size of the CUDA mipmapped array. + The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::alignment + represents the alignment necessary for mapping the CUDA mipmapped + array. + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_INVALID_VALUE + + \param[out] memoryRequirements - Pointer to ::CUDA_ARRAY_MEMORY_REQUIREMENTS + \param[in] mipmap - CUDA mipmapped array to get the memory requirements of + \param[in] device - Device to get the memory requirements for + \sa ::cuArrayGetMemoryRequirements, ::cuMemMapArrayAsync*/ + fn cuMipmappedArrayGetMemoryRequirements( + memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS, + mipmap: cuda_types::CUmipmappedArray, + device: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Gets a CUDA array plane from a CUDA array + + Returns in \p pPlaneArray a CUDA array that represents a single format plane + of the CUDA array \p hArray. + + If \p planeIdx is greater than the maximum number of planes in this array or if the array does + not have a multi-planar format e.g: ::CU_AD_FORMAT_NV12, then ::CUDA_ERROR_INVALID_VALUE is returned. + + Note that if the \p hArray has format ::CU_AD_FORMAT_NV12, then passing in 0 for \p planeIdx returns + a CUDA array of the same size as \p hArray but with one channel and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. + If 1 is passed for \p planeIdx, then the returned CUDA array has half the height and width + of \p hArray with two channels and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. + + \param pPlaneArray - Returned CUDA array referenced by the \p planeIdx + \param hArray - Multiplanar CUDA array + \param planeIdx - Plane index + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa + ::cuArrayCreate, + ::cudaArrayGetPlane*/ + fn cuArrayGetPlane( + pPlaneArray: *mut cuda_types::CUarray, + hArray: cuda_types::CUarray, + planeIdx: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Destroys a CUDA array + + Destroys the CUDA array \p hArray. + + \param hArray - Array to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ARRAY_IS_MAPPED, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + \notefnerr + + \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaFreeArray*/ + fn cuArrayDestroy(hArray: cuda_types::CUarray) -> cuda_types::CUresult; + /** \brief Creates a 3D CUDA array + + Creates a CUDA array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure + \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. + The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: + + \code +typedef struct { +unsigned int Width; +unsigned int Height; +unsigned int Depth; +CUarray_format Format; +unsigned int NumChannels; +unsigned int Flags; +} CUDA_ARRAY3D_DESCRIPTOR; + \endcode + where: + + - \p Width, \p Height, and \p Depth are the width, height, and depth of the + CUDA array (in elements); the following types of CUDA arrays can be allocated: + - A 1D array is allocated if \p Height and \p Depth extents are both zero. + - A 2D array is allocated if only \p Depth extent is zero. + - A 3D array is allocated if all three extents are non-zero. + - A 1D layered CUDA array is allocated if only \p Height is zero and the + ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number + of layers is determined by the depth extent. + - A 2D layered CUDA array is allocated if all three extents are non-zero and + the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number + of layers is determined by the depth extent. + - A cubemap CUDA array is allocated if all three extents are non-zero and the + ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and + \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, + where the six layers represent the six faces of a cube. The order of the six + layers in memory is the same as that listed in ::CUarray_cubemap_face. + - A cubemap layered CUDA array is allocated if all three extents are non-zero, + and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. + \p Width must be equal to \p Height, and \p Depth must be a multiple of six. + A cubemap layered CUDA array is a special type of 2D layered CUDA array that + consists of a collection of cubemaps. The first six layers represent the first + cubemap, the next six layers form the second cubemap, and so on. + + - ::Format specifies the format of the elements; ::CUarray_format is + defined as: + \code +typedef enum CUarray_format_enum { +CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, +CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, +CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, +CU_AD_FORMAT_SIGNED_INT8 = 0x08, +CU_AD_FORMAT_SIGNED_INT16 = 0x09, +CU_AD_FORMAT_SIGNED_INT32 = 0x0a, +CU_AD_FORMAT_HALF = 0x10, +CU_AD_FORMAT_FLOAT = 0x20 +} CUarray_format; + \endcode + + - \p NumChannels specifies the number of packed components per CUDA array + element; it may be 1, 2, or 4; + + - ::Flags may be set to + - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA arrays. If this flag is set, + \p Depth specifies the number of layers, not the depth of a 3D array. + - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to the CUDA array. + If this flag is not set, ::cuSurfRefSetArray will fail when attempting to bind the CUDA array + to a surface reference. + - ::CUDA_ARRAY3D_CUBEMAP to enable creation of cubemaps. If this flag is set, \p Width must be + equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, + then \p Depth must be a multiple of six. + - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA array will be used for texture gather. + Texture gather can only be performed on 2D CUDA arrays. + + \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. + All values are specified in elements. Note that for brevity's sake, the full name of the device attribute + is not specified. For ex., TEXTURE1D_WIDTH refers to the device attribute + ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH. + + Note that 2D CUDA arrays have different size requirements if the ::CUDA_ARRAY3D_TEXTURE_GATHER flag + is set. \p Width and \p Height must not be greater than ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH + and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT respectively, in that case. + + + + + + + + + + + + + + + + + + + + + + + + + + +
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), + (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
+ {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_WIDTH), (1,TEXTURE2D_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } +
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), + (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), + (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, + (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, + (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), + (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), + (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), + (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), + (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), + (1,SURFACECUBEMAP_LAYERED_LAYERS) }
+ + Here are examples of CUDA array descriptions: + + Description for a CUDA array of 2048 floats: + \code +CUDA_ARRAY3D_DESCRIPTOR desc; +desc.Format = CU_AD_FORMAT_FLOAT; +desc.NumChannels = 1; +desc.Width = 2048; +desc.Height = 0; +desc.Depth = 0; + \endcode + + Description for a 64 x 64 CUDA array of floats: + \code +CUDA_ARRAY3D_DESCRIPTOR desc; +desc.Format = CU_AD_FORMAT_FLOAT; +desc.NumChannels = 1; +desc.Width = 64; +desc.Height = 64; +desc.Depth = 0; + \endcode + + Description for a \p width x \p height x \p depth CUDA array of 64-bit, + 4x16-bit float16's: + \code +CUDA_ARRAY3D_DESCRIPTOR desc; +desc.Format = CU_AD_FORMAT_HALF; +desc.NumChannels = 4; +desc.Width = width; +desc.Height = height; +desc.Depth = depth; + \endcode + + \param pHandle - Returned array + \param pAllocateArray - 3D array descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuArray3DGetDescriptor, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaMalloc3DArray*/ + fn cuArray3DCreate_v2( + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR, + ) -> cuda_types::CUresult; + /** \brief Get a 3D CUDA array descriptor + + Returns in \p *pArrayDescriptor a descriptor containing information on the + format and dimensions of the CUDA array \p hArray. It is useful for + subroutines that have been passed a CUDA array, but need to know the CUDA + array parameters for validation or other purposes. + + This function may be called on 1D and 2D arrays, in which case the \p Height + and/or \p Depth members of the descriptor struct will be set to 0. + + \param pArrayDescriptor - Returned 3D array descriptor + \param hArray - 3D array to get descriptor of + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + \notefnerr + + \sa ::cuArray3DCreate, ::cuArrayCreate, + ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + ::cudaArrayGetInfo*/ + fn cuArray3DGetDescriptor_v2( + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR, + hArray: cuda_types::CUarray, + ) -> cuda_types::CUresult; + /** \brief Creates a CUDA mipmapped array + + Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure + \p pMipmappedArrayDesc and returns a handle to the new CUDA mipmapped array in \p *pHandle. + \p numMipmapLevels specifies the number of mipmap levels to be allocated. This value is + clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]. + + The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: + + \code +typedef struct { +unsigned int Width; +unsigned int Height; +unsigned int Depth; +CUarray_format Format; +unsigned int NumChannels; +unsigned int Flags; +} CUDA_ARRAY3D_DESCRIPTOR; + \endcode + where: + + - \p Width, \p Height, and \p Depth are the width, height, and depth of the + CUDA array (in elements); the following types of CUDA arrays can be allocated: + - A 1D mipmapped array is allocated if \p Height and \p Depth extents are both zero. + - A 2D mipmapped array is allocated if only \p Depth extent is zero. + - A 3D mipmapped array is allocated if all three extents are non-zero. + - A 1D layered CUDA mipmapped array is allocated if only \p Height is zero and the + ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number + of layers is determined by the depth extent. + - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and + the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number + of layers is determined by the depth extent. + - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the + ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and + \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, + where the six layers represent the six faces of a cube. The order of the six + layers in memory is the same as that listed in ::CUarray_cubemap_face. + - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero, + and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. + \p Width must be equal to \p Height, and \p Depth must be a multiple of six. + A cubemap layered CUDA array is a special type of 2D layered CUDA array that + consists of a collection of cubemaps. The first six layers represent the first + cubemap, the next six layers form the second cubemap, and so on. + + - ::Format specifies the format of the elements; ::CUarray_format is + defined as: + \code +typedef enum CUarray_format_enum { +CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, +CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, +CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, +CU_AD_FORMAT_SIGNED_INT8 = 0x08, +CU_AD_FORMAT_SIGNED_INT16 = 0x09, +CU_AD_FORMAT_SIGNED_INT32 = 0x0a, +CU_AD_FORMAT_HALF = 0x10, +CU_AD_FORMAT_FLOAT = 0x20 +} CUarray_format; + \endcode + + - \p NumChannels specifies the number of packed components per CUDA array + element; it may be 1, 2, or 4; + + - ::Flags may be set to + - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA mipmapped arrays. If this flag is set, + \p Depth specifies the number of layers, not the depth of a 3D array. + - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to individual mipmap levels of + the CUDA mipmapped array. If this flag is not set, ::cuSurfRefSetArray will fail when attempting to + bind a mipmap level of the CUDA mipmapped array to a surface reference. + - ::CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped cubemaps. If this flag is set, \p Width must be + equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, + then \p Depth must be a multiple of six. + - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA mipmapped array will be used for texture gather. + Texture gather can only be performed on 2D CUDA mipmapped arrays. + + \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. + All values are specified in elements. Note that for brevity's sake, the full name of the device attribute + is not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device attribute + ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH. + + + + + + + + + + + + + + + + + + + + + + + + + + +
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), + (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
+ {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_MIPMAPPED_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_MIPMAPPED_WIDTH), (1,TEXTURE2D_MIPMAPPED_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } +
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), + (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), + (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, + (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, + (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), + (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), + (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), + (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), + (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), + (1,SURFACECUBEMAP_LAYERED_LAYERS) }
+ + + \param pHandle - Returned mipmapped array + \param pMipmappedArrayDesc - mipmapped array descriptor + \param numMipmapLevels - Number of mipmap levels + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cuMipmappedArrayDestroy, + ::cuMipmappedArrayGetLevel, + ::cuArrayCreate, + ::cudaMallocMipmappedArray*/ + fn cuMipmappedArrayCreate( + pHandle: *mut cuda_types::CUmipmappedArray, + pMipmappedArrayDesc: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR, + numMipmapLevels: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Gets a mipmap level of a CUDA mipmapped array + + Returns in \p *pLevelArray a CUDA array that represents a single mipmap level + of the CUDA mipmapped array \p hMipmappedArray. + + If \p level is greater than the maximum number of levels in this mipmapped array, + ::CUDA_ERROR_INVALID_VALUE is returned. + + \param pLevelArray - Returned mipmap level CUDA array + \param hMipmappedArray - CUDA mipmapped array + \param level - Mipmap level + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa + ::cuMipmappedArrayCreate, + ::cuMipmappedArrayDestroy, + ::cuArrayCreate, + ::cudaGetMipmappedArrayLevel*/ + fn cuMipmappedArrayGetLevel( + pLevelArray: *mut cuda_types::CUarray, + hMipmappedArray: cuda_types::CUmipmappedArray, + level: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Destroys a CUDA mipmapped array + + Destroys the CUDA mipmapped array \p hMipmappedArray. + + \param hMipmappedArray - Mipmapped array to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ARRAY_IS_MAPPED, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + \notefnerr + + \sa + ::cuMipmappedArrayCreate, + ::cuMipmappedArrayGetLevel, + ::cuArrayCreate, + ::cudaFreeMipmappedArray*/ + fn cuMipmappedArrayDestroy( + hMipmappedArray: cuda_types::CUmipmappedArray, + ) -> cuda_types::CUresult; + /** \brief Retrieve handle for an address range + + Get a handle of the specified type to an address range. The address range + must have been obtained by a prior call to either ::cuMemAlloc or ::cuMemAddressReserve. + If the address range was obtained via ::cuMemAddressReserve, it must also be fully mapped via ::cuMemMap. + The address range must have been obtained by a prior call to either ::cuMemAllocHost or + ::cuMemHostAlloc on Tegra. + + Users must ensure the \p dptr and \p size are aligned to the host page size. + + When requesting CUmemRangeHandleType::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD, + users are expected to query for dma_buf support for the platform + by using ::CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED device attribute before calling + this API. The \p handle will be interpreted as a pointer to an integer to store the dma_buf file descriptor. + Users must ensure the entire address range is backed and mapped when + the address range is allocated by ::cuMemAddressReserve. All the physical + allocations backing the address range must be resident on the same device and + have identical allocation properties. Users are also expected to retrieve a + new handle every time the underlying physical allocation(s) corresponding + to a previously queried VA range are changed. + + \param[out] handle - Pointer to the location where the returned handle will be stored. + \param[in] dptr - Pointer to a valid CUDA device allocation. Must be aligned to host page size. + \param[in] size - Length of the address range. Must be aligned to host page size. + \param[in] handleType - Type of handle requested (defines type and size of the \p handle output parameter) + \param[in] flags - Reserved, must be zero + + \return + CUDA_SUCCESS + CUDA_ERROR_INVALID_VALUE + CUDA_ERROR_NOT_SUPPORTED*/ + fn cuMemGetHandleForAddressRange( + handle: *mut ::core::ffi::c_void, + dptr: cuda_types::CUdeviceptr, + size: usize, + handleType: cuda_types::CUmemRangeHandleType, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Allocate an address range reservation. + + Reserves a virtual address range based on the given parameters, giving + the starting address of the range in \p ptr. This API requires a system that + supports UVA. The size and address parameters must be a multiple of the + host page size and the alignment must be a power of two or zero for default + alignment. + + \param[out] ptr - Resulting pointer to start of virtual address range allocated + \param[in] size - Size of the reserved virtual address range requested + \param[in] alignment - Alignment of the reserved virtual address range requested + \param[in] addr - Fixed starting address range requested + \param[in] flags - Currently unused, must be zero + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemAddressFree*/ + fn cuMemAddressReserve( + ptr: *mut cuda_types::CUdeviceptr, + size: usize, + alignment: usize, + addr: cuda_types::CUdeviceptr, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Free an address range reservation. + + Frees a virtual address range reserved by cuMemAddressReserve. The size + must match what was given to memAddressReserve and the ptr given must + match what was returned from memAddressReserve. + + \param[in] ptr - Starting address of the virtual address range to free + \param[in] size - Size of the virtual address region to free + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemAddressReserve*/ + fn cuMemAddressFree( + ptr: cuda_types::CUdeviceptr, + size: usize, + ) -> cuda_types::CUresult; + /** \brief Create a CUDA memory handle representing a memory allocation of a given size described by the given properties + + This creates a memory allocation on the target device specified through the + \p prop structure. The created allocation will not have any device or host + mappings. The generic memory \p handle for the allocation can be + mapped to the address space of calling process via ::cuMemMap. This handle + cannot be transmitted directly to other processes (see + ::cuMemExportToShareableHandle). On Windows, the caller must also pass + an LPSECURITYATTRIBUTE in \p prop to be associated with this handle which + limits or allows access to this handle for a recipient process (see + ::CUmemAllocationProp::win32HandleMetaData for more). The \p size of this + allocation must be a multiple of the the value given via + ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM + flag. + To create a CPU allocation targeting a specific host NUMA node, applications must + set ::CUmemAllocationProp::CUmemLocation::type to ::CU_MEM_LOCATION_TYPE_HOST_NUMA and + ::CUmemAllocationProp::CUmemLocation::id must specify the NUMA ID of the CPU. + On systems where NUMA is not available ::CUmemAllocationProp::CUmemLocation::id must be set to 0. + + Applications can set ::CUmemAllocationProp::requestedHandleTypes to + ::CU_MEM_HANDLE_TYPE_FABRIC in order to create allocations suitable for sharing + within an IMEX domain. An IMEX domain is either an OS instance or a group of securely + connected OS instances using the NVIDIA IMEX daemon. An IMEX channel is a global resource + within the IMEX domain that represents a logical entity that aims to provide fine grained + accessibility control for the participating processes. When exporter and importer CUDA processes + have been granted access to the same IMEX channel, they can securely share memory. + If the allocating process does not have access setup for an IMEX channel, attempting to create + a ::CUmemGenericAllocationHandle with ::CU_MEM_HANDLE_TYPE_FABRIC will result in ::CUDA_ERROR_NOT_PERMITTED. + The nvidia-modprobe CLI provides more information regarding setting up of IMEX channels. + + If ::CUmemAllocationProp::allocFlags::usage contains ::CU_MEM_CREATE_USAGE_TILE_POOL flag then + the memory allocation is intended only to be used as backing tile pool for sparse CUDA arrays + and sparse CUDA mipmapped arrays. + (see ::cuMemMapArrayAsync). + + \param[out] handle - Value of handle returned. All operations on this allocation are to be performed using this handle. + \param[in] size - Size of the allocation requested + \param[in] prop - Properties of the allocation to create. + \param[in] flags - flags for future use, must be zero now. + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle*/ + fn cuMemCreate( + handle: *mut cuda_types::CUmemGenericAllocationHandle, + size: usize, + prop: *const cuda_types::CUmemAllocationProp, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate. + + Frees the memory that was allocated on a device through cuMemCreate. + + The memory allocation will be freed when all outstanding mappings to the memory + are unmapped and when all outstanding references to the handle (including it's + shareable counterparts) are also released. The generic memory handle can be + freed when there are still outstanding mappings made with this handle. Each + time a recipient process imports a shareable handle, it needs to pair it with + ::cuMemRelease for the handle to be freed. If \p handle is not a valid handle + the behavior is undefined. + + \param[in] handle Value of handle which was returned previously by cuMemCreate. + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuMemCreate*/ + fn cuMemRelease( + handle: cuda_types::CUmemGenericAllocationHandle, + ) -> cuda_types::CUresult; + /** \brief Maps an allocation handle to a reserved virtual address range. + + Maps bytes of memory represented by \p handle starting from byte \p offset to + \p size to address range [\p addr, \p addr + \p size]. This range must be an + address reservation previously reserved with ::cuMemAddressReserve, and + \p offset + \p size must be less than the size of the memory allocation. + Both \p ptr, \p size, and \p offset must be a multiple of the value given via + ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. + If \p handle represents a multicast object, \p ptr, \p size and \p offset must + be aligned to the value returned by ::cuMulticastGetGranularity with the flag + ::CU_MULTICAST_MINIMUM_GRANULARITY. For best performance however, it is + recommended that \p ptr, \p size and \p offset be aligned to the value + returned by ::cuMulticastGetGranularity with the flag + ::CU_MULTICAST_RECOMMENDED_GRANULARITY. + + Please note calling ::cuMemMap does not make the address accessible, + the caller needs to update accessibility of a contiguous mapped VA + range by calling ::cuMemSetAccess. + + Once a recipient process obtains a shareable memory handle + from ::cuMemImportFromShareableHandle, the process must + use ::cuMemMap to map the memory into its address ranges before + setting accessibility with ::cuMemSetAccess. + + ::cuMemMap can only create mappings on VA range reservations + that are not currently mapped. + + \param[in] ptr - Address where memory will be mapped. + \param[in] size - Size of the memory mapping. + \param[in] offset - Offset into the memory represented by + - \p handle from which to start mapping + - Note: currently must be zero. + \param[in] handle - Handle to a shareable memory + \param[in] flags - flags for future use, must be zero now. + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuMemUnmap, ::cuMemSetAccess, ::cuMemCreate, ::cuMemAddressReserve, ::cuMemImportFromShareableHandle*/ + fn cuMemMap( + ptr: cuda_types::CUdeviceptr, + size: usize, + offset: usize, + handle: cuda_types::CUmemGenericAllocationHandle, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays + + Performs map or unmap operations on subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays. + Each operation is specified by a ::CUarrayMapInfo entry in the \p mapInfoList array of size \p count. + The structure ::CUarrayMapInfo is defined as follow: +\code +typedef struct CUarrayMapInfo_st { +CUresourcetype resourceType; +union { +CUmipmappedArray mipmap; +CUarray array; +} resource; + +CUarraySparseSubresourceType subresourceType; +union { +struct { +unsigned int level; +unsigned int layer; +unsigned int offsetX; +unsigned int offsetY; +unsigned int offsetZ; +unsigned int extentWidth; +unsigned int extentHeight; +unsigned int extentDepth; +} sparseLevel; +struct { +unsigned int layer; +unsigned long long offset; +unsigned long long size; +} miptail; +} subresource; + +CUmemOperationType memOperationType; + +CUmemHandleType memHandleType; +union { +CUmemGenericAllocationHandle memHandle; +} memHandle; + +unsigned long long offset; +unsigned int deviceBitMask; +unsigned int flags; +unsigned int reserved[2]; +} CUarrayMapInfo; +\endcode + + where ::CUarrayMapInfo::resourceType specifies the type of resource to be operated on. + If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_ARRAY then + ::CUarrayMapInfo::resource::array must be set to a valid sparse CUDA array handle. + The CUDA array must be either a 2D, 2D layered or 3D CUDA array and must have been allocated using + ::cuArrayCreate or ::cuArray3DCreate with the flag ::CUDA_ARRAY3D_SPARSE + or ::CUDA_ARRAY3D_DEFERRED_MAPPING. + For CUDA arrays obtained using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. + If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY + then ::CUarrayMapInfo::resource::mipmap must be set to a valid sparse CUDA mipmapped array handle. + The CUDA mipmapped array must be either a 2D, 2D layered or 3D CUDA mipmapped array and must have been + allocated using ::cuMipmappedArrayCreate with the flag ::CUDA_ARRAY3D_SPARSE + or ::CUDA_ARRAY3D_DEFERRED_MAPPING. + + ::CUarrayMapInfo::subresourceType specifies the type of subresource within the resource. + ::CUarraySparseSubresourceType_enum is defined as: +\code +typedef enum CUarraySparseSubresourceType_enum { +CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0, +CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1 +} CUarraySparseSubresourceType; +\endcode + + where ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL indicates a + sparse-miplevel which spans at least one tile in every dimension. The remaining miplevels which + are too small to span at least one tile in any dimension constitute the mip tail region as indicated by + ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL subresource type. + + If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL + then ::CUarrayMapInfo::subresource::sparseLevel struct must contain valid array subregion offsets and extents. + The ::CUarrayMapInfo::subresource::sparseLevel::offsetX, ::CUarrayMapInfo::subresource::sparseLevel::offsetY + and ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must specify valid X, Y and Z offsets respectively. + The ::CUarrayMapInfo::subresource::sparseLevel::extentWidth, ::CUarrayMapInfo::subresource::sparseLevel::extentHeight + and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth must specify valid width, height and depth extents respectively. + These offsets and extents must be aligned to the corresponding tile dimension. + For CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::level must specify a valid mip level index. Otherwise, + must be zero. + For layered CUDA arrays and layered CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::layer must specify a valid layer index. Otherwise, + must be zero. + ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must be zero and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth + must be set to 1 for 2D and 2D layered CUDA arrays and CUDA mipmapped arrays. + Tile extents can be obtained by calling ::cuArrayGetSparseProperties and ::cuMipmappedArrayGetSparseProperties + + If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL + then ::CUarrayMapInfo::subresource::miptail struct must contain valid mip tail offset in + ::CUarrayMapInfo::subresource::miptail::offset and size in ::CUarrayMapInfo::subresource::miptail::size. + Both, mip tail offset and mip tail size must be aligned to the tile size. + For layered CUDA mipmapped arrays which don't have the flag ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL set in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags + as returned by ::cuMipmappedArrayGetSparseProperties, ::CUarrayMapInfo::subresource::miptail::layer must specify a valid layer index. + Otherwise, must be zero. + + If ::CUarrayMapInfo::resource::array or ::CUarrayMapInfo::resource::mipmap was created with ::CUDA_ARRAY3D_DEFERRED_MAPPING + flag set the ::CUarrayMapInfo::subresourceType and the contents of ::CUarrayMapInfo::subresource will be ignored. + + ::CUarrayMapInfo::memOperationType specifies the type of operation. ::CUmemOperationType is defined as: +\code +typedef enum CUmemOperationType_enum { +CU_MEM_OPERATION_TYPE_MAP = 1, +CU_MEM_OPERATION_TYPE_UNMAP = 2 +} CUmemOperationType; +\endcode + If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP then the subresource + will be mapped onto the tile pool memory specified by ::CUarrayMapInfo::memHandle at offset ::CUarrayMapInfo::offset. + The tile pool allocation has to be created by specifying the ::CU_MEM_CREATE_USAGE_TILE_POOL flag when calling ::cuMemCreate. Also, + ::CUarrayMapInfo::memHandleType must be set to ::CUmemHandleType::CU_MEM_HANDLE_TYPE_GENERIC. + + If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_UNMAP then an unmapping operation + is performed. ::CUarrayMapInfo::memHandle must be NULL. + + ::CUarrayMapInfo::deviceBitMask specifies the list of devices that must map or unmap physical memory. + Currently, this mask must have exactly one bit set, and the corresponding device must match the device associated with the stream. + If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP, the device must also match + the device associated with the tile pool memory allocation as specified by ::CUarrayMapInfo::memHandle. + + ::CUarrayMapInfo::flags and ::CUarrayMapInfo::reserved[] are unused and must be set to zero. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + + \param[in] mapInfoList - List of ::CUarrayMapInfo + \param[in] count - Count of ::CUarrayMapInfo in \p mapInfoList + \param[in] hStream - Stream identifier for the stream to use for map or unmap operations + + \sa ::cuMipmappedArrayCreate, ::cuArrayCreate, ::cuArray3DCreate, ::cuMemCreate, ::cuArrayGetSparseProperties, ::cuMipmappedArrayGetSparseProperties*/ + fn cuMemMapArrayAsync_ptsz( + mapInfoList: *mut cuda_types::CUarrayMapInfo, + count: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Unmap the backing memory of a given address range. + + The range must be the entire contiguous address range that was mapped to. In + other words, ::cuMemUnmap cannot unmap a sub-range of an address range mapped + by ::cuMemCreate / ::cuMemMap. Any backing memory allocations will be freed + if there are no existing mappings and there are no unreleased memory handles. + + When ::cuMemUnmap returns successfully the address range is converted to an + address reservation and can be used for a future calls to ::cuMemMap. Any new + mapping to this virtual address will need to have access granted through + ::cuMemSetAccess, as all mappings start with no accessibility setup. + + \param[in] ptr - Starting address for the virtual address range to unmap + \param[in] size - Size of the virtual address range to unmap + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + \note_sync + + \sa ::cuMemCreate, ::cuMemAddressReserve*/ + fn cuMemUnmap(ptr: cuda_types::CUdeviceptr, size: usize) -> cuda_types::CUresult; + /** \brief Set the access flags for each location specified in \p desc for the given virtual address range + + Given the virtual address range via \p ptr and \p size, and the locations + in the array given by \p desc and \p count, set the access flags for the + target locations. The range must be a fully mapped address range + containing all allocations created by ::cuMemMap / ::cuMemCreate. + Users cannot specify ::CU_MEM_LOCATION_TYPE_HOST_NUMA accessibility for allocations created on with other location types. + Note: When ::CUmemAccessDesc::CUmemLocation::type is ::CU_MEM_LOCATION_TYPE_HOST_NUMA, ::CUmemAccessDesc::CUmemLocation::id + is ignored. + When setting the access flags for a virtual address range mapping a multicast + object, \p ptr and \p size must be aligned to the value returned by + ::cuMulticastGetGranularity with the flag ::CU_MULTICAST_MINIMUM_GRANULARITY. + For best performance however, it is recommended that \p ptr and \p size be + aligned to the value returned by ::cuMulticastGetGranularity with the flag + ::CU_MULTICAST_RECOMMENDED_GRANULARITY. + + \param[in] ptr - Starting address for the virtual address range + \param[in] size - Length of the virtual address range + \param[in] desc - Array of ::CUmemAccessDesc that describe how to change the + - mapping for each location specified + \param[in] count - Number of ::CUmemAccessDesc in \p desc + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + \note_sync + + \sa ::cuMemSetAccess, ::cuMemCreate, :cuMemMap*/ + fn cuMemSetAccess( + ptr: cuda_types::CUdeviceptr, + size: usize, + desc: *const cuda_types::CUmemAccessDesc, + count: usize, + ) -> cuda_types::CUresult; + /** \brief Get the access \p flags set for the given \p location and \p ptr + + \param[out] flags - Flags set for this location + \param[in] location - Location in which to check the flags for + \param[in] ptr - Address in which to check the access flags for + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemSetAccess*/ + fn cuMemGetAccess( + flags: *mut ::core::ffi::c_ulonglong, + location: *const cuda_types::CUmemLocation, + ptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Exports an allocation to a requested shareable handle type + + Given a CUDA memory handle, create a shareable memory + allocation handle that can be used to share the memory with other + processes. The recipient process can convert the shareable handle back into a + CUDA memory handle using ::cuMemImportFromShareableHandle and map + it with ::cuMemMap. The implementation of what this handle is and how it + can be transferred is defined by the requested handle type in \p handleType + + Once all shareable handles are closed and the allocation is released, the allocated + memory referenced will be released back to the OS and uses of the CUDA handle afterward + will lead to undefined behavior. + + This API can also be used in conjunction with other APIs (e.g. Vulkan, OpenGL) + that support importing memory from the shareable type + + \param[out] shareableHandle - Pointer to the location in which to store the requested handle type + \param[in] handle - CUDA handle for the memory allocation + \param[in] handleType - Type of shareable handle requested (defines type and size of the \p shareableHandle output parameter) + \param[in] flags - Reserved, must be zero + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemImportFromShareableHandle*/ + fn cuMemExportToShareableHandle( + shareableHandle: *mut ::core::ffi::c_void, + handle: cuda_types::CUmemGenericAllocationHandle, + handleType: cuda_types::CUmemAllocationHandleType, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Imports an allocation from a requested shareable handle type. + + If the current process cannot support the memory described by this shareable + handle, this API will error as ::CUDA_ERROR_NOT_SUPPORTED. + + If \p shHandleType is ::CU_MEM_HANDLE_TYPE_FABRIC and the importer process has not been + granted access to the same IMEX channel as the exporter process, this API will error + as ::CUDA_ERROR_NOT_PERMITTED. + + \note Importing shareable handles exported from some graphics APIs(VUlkan, OpenGL, etc) + created on devices under an SLI group may not be supported, and thus this API will + return CUDA_ERROR_NOT_SUPPORTED. + There is no guarantee that the contents of \p handle will be the same CUDA memory handle + for the same given OS shareable handle, or the same underlying allocation. + + \param[out] handle - CUDA Memory handle for the memory allocation. + \param[in] osHandle - Shareable Handle representing the memory allocation that is to be imported. + \param[in] shHandleType - handle type of the exported handle ::CUmemAllocationHandleType. + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemExportToShareableHandle, ::cuMemMap, ::cuMemRelease*/ + fn cuMemImportFromShareableHandle( + handle: *mut cuda_types::CUmemGenericAllocationHandle, + osHandle: *mut ::core::ffi::c_void, + shHandleType: cuda_types::CUmemAllocationHandleType, + ) -> cuda_types::CUresult; + /** \brief Calculates either the minimal or recommended granularity + + Calculates either the minimal or recommended granularity + for a given allocation specification and returns it in granularity. This + granularity can be used as a multiple for alignment, size, or address mapping. + + \param[out] granularity Returned granularity. + \param[in] prop Property for which to determine the granularity for + \param[in] option Determines which granularity to return + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemCreate, ::cuMemMap*/ + fn cuMemGetAllocationGranularity( + granularity: *mut usize, + prop: *const cuda_types::CUmemAllocationProp, + option: cuda_types::CUmemAllocationGranularity_flags, + ) -> cuda_types::CUresult; + /** \brief Retrieve the contents of the property structure defining properties for this handle + + \param[out] prop - Pointer to a properties structure which will hold the information about this handle + \param[in] handle - Handle which to perform the query on + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemCreate, ::cuMemImportFromShareableHandle*/ + fn cuMemGetAllocationPropertiesFromHandle( + prop: *mut cuda_types::CUmemAllocationProp, + handle: cuda_types::CUmemGenericAllocationHandle, + ) -> cuda_types::CUresult; + /** \brief Given an address \p addr, returns the allocation handle of the backing memory allocation. + + The handle is guaranteed to be the same handle value used to map the memory. If the address + requested is not mapped, the function will fail. The returned handle must be released with + corresponding number of calls to ::cuMemRelease. + + \note The address \p addr, can be any address in a range previously mapped + by ::cuMemMap, and not necessarily the start address. + + \param[out] handle CUDA Memory handle for the backing memory allocation. + \param[in] addr Memory address to query, that has been mapped previously. + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMemCreate, ::cuMemRelease, ::cuMemMap*/ + fn cuMemRetainAllocationHandle( + handle: *mut cuda_types::CUmemGenericAllocationHandle, + addr: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Frees memory with stream ordered semantics + + Inserts a free operation into \p hStream. + The allocation must not be accessed after stream execution reaches the free. + After this API returns, accessing the memory from any subsequent work launched on the GPU + or querying its pointer attributes results in undefined behavior. + + \note During stream capture, this function results in the creation of a free node and + must therefore be passed the address of a graph allocation. + + \param dptr - memory to free + \param hStream - The stream establishing the stream ordering contract. + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + ::CUDA_ERROR_NOT_SUPPORTED*/ + fn cuMemFreeAsync_ptsz( + dptr: cuda_types::CUdeviceptr, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Allocates memory with stream ordered semantics + + Inserts an allocation operation into \p hStream. + A pointer to the allocated memory is returned immediately in *dptr. + The allocation must not be accessed until the the allocation operation completes. + The allocation comes from the memory pool current to the stream's device. + + \note The default memory pool of a device contains device memory from that device. + \note Basic stream ordering allows future work submitted into the same stream to use the allocation. + Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + operation completes before work submitted in a separate stream runs. + \note During stream capture, this function results in the creation of an allocation node. In this case, + the allocation is owned by the graph instead of the memory pool. The memory pool's properties + are used to set the node's creation parameters. + + \param[out] dptr - Returned device pointer + \param[in] bytesize - Number of bytes to allocate + \param[in] hStream - The stream establishing the stream ordering contract and the memory pool to allocate from + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuMemAllocFromPoolAsync, ::cuMemFreeAsync, ::cuDeviceSetMemPool, + ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, + ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute*/ + fn cuMemAllocAsync_ptsz( + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Tries to release memory back to the OS + + Releases memory back to the OS until the pool contains fewer than minBytesToKeep + reserved bytes, or there is no more memory that the allocator can safely release. + The allocator cannot release OS allocations that back outstanding asynchronous allocations. + The OS allocations may happen at different granularity from the user allocations. + + \note: Allocations that have not been freed count as outstanding. + \note: Allocations that have been asynchronously freed but whose completion has + not been observed on the host (eg. by a synchronize) can count as outstanding. + + \param[in] pool - The memory pool to trim + \param[in] minBytesToKeep - If the pool has less than minBytesToKeep reserved, + the TrimTo operation is a no-op. Otherwise the pool will be guaranteed to have + at least minBytesToKeep bytes reserved after the operation. + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + ::cuDeviceGetMemPool, ::cuMemPoolCreate*/ + fn cuMemPoolTrimTo( + pool: cuda_types::CUmemoryPool, + minBytesToKeep: usize, + ) -> cuda_types::CUresult; + /** \brief Sets attributes of a memory pool + + Supported attributes are: + - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) + Amount of reserved memory in bytes to hold onto before trying + to release memory back to the OS. When more than the release + threshold bytes of memory are held by the memory pool, the + allocator will try to release memory back to the OS on the + next call to stream, event or context synchronize. (default 0) + - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) + Allow ::cuMemAllocAsync to use memory asynchronously freed + in another stream as long as a stream ordering dependency + of the allocating stream on the free action exists. + Cuda events and null stream interactions can create the required + stream ordered dependencies. (default enabled) + - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) + Allow reuse of already completed frees when there is no dependency + between the free and allocation. (default enabled) + - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) + Allow ::cuMemAllocAsync to insert new stream dependencies + in order to establish the stream ordering required to reuse + a piece of memory released by ::cuMemFreeAsync (default enabled). + - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) + Reset the high watermark that tracks the amount of backing memory that was + allocated for the memory pool. It is illegal to set this attribute to a non-zero value. + - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) + Reset the high watermark that tracks the amount of used memory that was + allocated for the memory pool. + + \param[in] pool - The memory pool to modify + \param[in] attr - The attribute to modify + \param[in] value - Pointer to the value to assign + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + ::cuDeviceGetMemPool, ::cuMemPoolCreate*/ + fn cuMemPoolSetAttribute( + pool: cuda_types::CUmemoryPool, + attr: cuda_types::CUmemPool_attribute, + value: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Gets attributes of a memory pool + + Supported attributes are: + - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) + Amount of reserved memory in bytes to hold onto before trying + to release memory back to the OS. When more than the release + threshold bytes of memory are held by the memory pool, the + allocator will try to release memory back to the OS on the + next call to stream, event or context synchronize. (default 0) + - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) + Allow ::cuMemAllocAsync to use memory asynchronously freed + in another stream as long as a stream ordering dependency + of the allocating stream on the free action exists. + Cuda events and null stream interactions can create the required + stream ordered dependencies. (default enabled) + - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) + Allow reuse of already completed frees when there is no dependency + between the free and allocation. (default enabled) + - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) + Allow ::cuMemAllocAsync to insert new stream dependencies + in order to establish the stream ordering required to reuse + a piece of memory released by ::cuMemFreeAsync (default enabled). + - ::CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: (value type = cuuint64_t) + Amount of backing memory currently allocated for the mempool + - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) + High watermark of backing memory allocated for the mempool since the + last time it was reset. + - ::CU_MEMPOOL_ATTR_USED_MEM_CURRENT: (value type = cuuint64_t) + Amount of memory from the pool that is currently in use by the application. + - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) + High watermark of the amount of memory from the pool that was in use by the application. + + \param[in] pool - The memory pool to get attributes of + \param[in] attr - The attribute to get + \param[out] value - Retrieved value + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + ::cuDeviceGetMemPool, ::cuMemPoolCreate*/ + fn cuMemPoolGetAttribute( + pool: cuda_types::CUmemoryPool, + attr: cuda_types::CUmemPool_attribute, + value: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Controls visibility of pools between devices + + \param[in] pool - The pool being modified + \param[in] map - Array of access descriptors. Each descriptor instructs the access to enable for a single gpu. + \param[in] count - Number of descriptors in the map array. + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + ::cuDeviceGetMemPool, ::cuMemPoolCreate*/ + fn cuMemPoolSetAccess( + pool: cuda_types::CUmemoryPool, + map: *const cuda_types::CUmemAccessDesc, + count: usize, + ) -> cuda_types::CUresult; + /** \brief Returns the accessibility of a pool from a device + + Returns the accessibility of the pool's memory from the specified location. + + \param[out] flags - the accessibility of the pool from the specified location + \param[in] memPool - the pool being queried + \param[in] location - the location accessing the pool + + \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + ::cuDeviceGetMemPool, ::cuMemPoolCreate*/ + fn cuMemPoolGetAccess( + flags: *mut cuda_types::CUmemAccess_flags, + memPool: cuda_types::CUmemoryPool, + location: *mut cuda_types::CUmemLocation, + ) -> cuda_types::CUresult; + /** \brief Creates a memory pool + + Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines + the properties of the pool such as the backing device and IPC capabilities. + + To create a memory pool targeting a specific host NUMA node, applications must + set ::CUmemPoolProps::CUmemLocation::type to ::CU_MEM_LOCATION_TYPE_HOST_NUMA and + ::CUmemPoolProps::CUmemLocation::id must specify the NUMA ID of the host memory node. + By default, the pool's memory will be accessible from the device it is allocated on. + In the case of pools created with ::CU_MEM_LOCATION_TYPE_HOST_NUMA, their default accessibility + will be from the host CPU. + Applications can control the maximum size of the pool by specifying a non-zero value for ::CUmemPoolProps::maxSize. + If set to 0, the maximum size of the pool will default to a system dependent value. + + Applications can set ::CUmemPoolProps::handleTypes to ::CU_MEM_HANDLE_TYPE_FABRIC + in order to create ::CUmemoryPool suitable for sharing within an IMEX domain. + An IMEX domain is either an OS instance or a group of securely connected OS instances + using the NVIDIA IMEX daemon. An IMEX channel is a global resource within the IMEX domain + that represents a logical entity that aims to provide fine grained accessibility control + for the participating processes. When exporter and importer CUDA processes have been + granted access to the same IMEX channel, they can securely share memory. + If the allocating process does not have access setup for an IMEX channel, attempting to export + a ::CUmemoryPool with ::CU_MEM_HANDLE_TYPE_FABRIC will result in ::CUDA_ERROR_NOT_PERMITTED. + The nvidia-modprobe CLI provides more information regarding setting up of IMEX channels. + + \note Specifying CU_MEM_HANDLE_TYPE_NONE creates a memory pool that will not support IPC. + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_NOT_PERMITTED + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, ::cuDeviceGetDefaultMemPool, + ::cuMemAllocFromPoolAsync, ::cuMemPoolExportToShareableHandle*/ + fn cuMemPoolCreate( + pool: *mut cuda_types::CUmemoryPool, + poolProps: *const cuda_types::CUmemPoolProps, + ) -> cuda_types::CUresult; + /** \brief Destroys the specified memory pool + + If any pointers obtained from this pool haven't been freed or + the pool has free operations that haven't completed + when ::cuMemPoolDestroy is invoked, the function will return immediately and the + resources associated with the pool will be released automatically + once there are no more outstanding allocations. + + Destroying the current mempool of a device sets the default mempool of + that device as the current mempool for that device. + + \note A device's default memory pool cannot be destroyed. + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuMemFreeAsync, ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, + ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate*/ + fn cuMemPoolDestroy(pool: cuda_types::CUmemoryPool) -> cuda_types::CUresult; + /** \brief Allocates memory from a specified pool with stream ordered semantics. + + Inserts an allocation operation into \p hStream. + A pointer to the allocated memory is returned immediately in *dptr. + The allocation must not be accessed until the the allocation operation completes. + The allocation comes from the specified memory pool. + + \note + - The specified memory pool may be from a device different than that of the specified \p hStream. + + - Basic stream ordering allows future work submitted into the same stream to use the allocation. + Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + operation completes before work submitted in a separate stream runs. + + \note During stream capture, this function results in the creation of an allocation node. In this case, + the allocation is owned by the graph instead of the memory pool. The memory pool's properties + are used to set the node's creation parameters. + + \param[out] dptr - Returned device pointer + \param[in] bytesize - Number of bytes to allocate + \param[in] pool - The pool to allocate from + \param[in] hStream - The stream establishing the stream ordering semantic + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolSetAccess, + ::cuMemPoolSetAttribute*/ + fn cuMemAllocFromPoolAsync_ptsz( + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + pool: cuda_types::CUmemoryPool, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Exports a memory pool to the requested handle type. + + Given an IPC capable mempool, create an OS handle to share the pool with another process. + A recipient process can convert the shareable handle into a mempool with ::cuMemPoolImportFromShareableHandle. + Individual pointers can then be shared with the ::cuMemPoolExportPointer and ::cuMemPoolImportPointer APIs. + The implementation of what the shareable handle is and how it can be transferred is defined by the requested + handle type. + + \note: To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than CU_MEM_HANDLE_TYPE_NONE. + + \param[out] handle_out - Returned OS handle + \param[in] pool - pool to export + \param[in] handleType - the type of handle to create + \param[in] flags - must be 0 + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer, + ::cuMemPoolImportPointer, ::cuMemAllocAsync, ::cuMemFreeAsync, + ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, + ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute*/ + fn cuMemPoolExportToShareableHandle( + handle_out: *mut ::core::ffi::c_void, + pool: cuda_types::CUmemoryPool, + handleType: cuda_types::CUmemAllocationHandleType, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief imports a memory pool from a shared handle. + + Specific allocations can be imported from the imported pool with cuMemPoolImportPointer. + + If \p handleType is ::CU_MEM_HANDLE_TYPE_FABRIC and the importer process has not been + granted access to the same IMEX channel as the exporter process, this API will error + as ::CUDA_ERROR_NOT_PERMITTED. + + + \note Imported memory pools do not support creating new allocations. + As such imported memory pools may not be used in cuDeviceSetMemPool + or ::cuMemAllocFromPoolAsync calls. + + \param[out] pool_out - Returned memory pool + \param[in] handle - OS handle of the pool to open + \param[in] handleType - The type of handle being imported + \param[in] flags - must be 0 + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolExportPointer, ::cuMemPoolImportPointer*/ + fn cuMemPoolImportFromShareableHandle( + pool_out: *mut cuda_types::CUmemoryPool, + handle: *mut ::core::ffi::c_void, + handleType: cuda_types::CUmemAllocationHandleType, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Export data to share a memory pool allocation between processes. + + Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool. + The recipient process can import the allocation with the ::cuMemPoolImportPointer api. + The data is not a handle and may be shared through any IPC mechanism. + + \param[out] shareData_out - Returned export data + \param[in] ptr - pointer to memory being exported + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolImportPointer*/ + fn cuMemPoolExportPointer( + shareData_out: *mut cuda_types::CUmemPoolPtrExportData, + ptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Import a memory pool allocation from another process. + + Returns in \p ptr_out a pointer to the imported memory. + The imported memory must not be accessed before the allocation operation completes + in the exporting process. The imported memory must be freed from all importing processes before + being freed in the exporting process. The pointer may be freed with cuMemFree + or cuMemFreeAsync. If cuMemFreeAsync is used, the free must be completed + on the importing process before the free operation on the exporting process. + + \note The cuMemFreeAsync api may be used in the exporting process before + the cuMemFreeAsync operation completes in its stream as long as the + cuMemFreeAsync in the exporting process specifies a stream with + a stream dependency on the importing process's cuMemFreeAsync. + + \param[out] ptr_out - pointer to imported memory + \param[in] pool - pool from which to import + \param[in] shareData - data specifying the memory to import + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer*/ + fn cuMemPoolImportPointer( + ptr_out: *mut cuda_types::CUdeviceptr, + pool: cuda_types::CUmemoryPool, + shareData: *mut cuda_types::CUmemPoolPtrExportData, + ) -> cuda_types::CUresult; + /** \brief Create a generic allocation handle representing a multicast object described by the given properties. + + This creates a multicast object as described by \p prop. The number of + participating devices is specified by ::CUmulticastObjectProp::numDevices. + Devices can be added to the multicast object via ::cuMulticastAddDevice. + All participating devices must be added to the multicast object before memory + can be bound to it. Memory is bound to the multicast object via either + ::cuMulticastBindMem or ::cuMulticastBindAddr, and can be unbound via + ::cuMulticastUnbind. The total amount of memory that can be bound per device + is specified by :CUmulticastObjectProp::size. This size must be a multiple of + the value returned by ::cuMulticastGetGranularity with the flag + ::CU_MULTICAST_GRANULARITY_MINIMUM. For best performance however, the size + should be aligned to the value returned by ::cuMulticastGetGranularity with + the flag ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + + After all participating devices have been added, multicast objects can also + be mapped to a device's virtual address space using the virtual memory + management APIs (see ::cuMemMap and ::cuMemSetAccess). Multicast objects can + also be shared with other processes by requesting a shareable handle via + ::cuMemExportToShareableHandle. Note that the desired types of shareable + handles must be specified in the bitmask ::CUmulticastObjectProp::handleTypes. + Multicast objects can be released using the virtual memory management API + ::cuMemRelease. + + \param[out] mcHandle Value of handle returned. + \param[in] prop Properties of the multicast object to create. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMulticastAddDevice, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind + \sa ::cuMemCreate, ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle*/ + fn cuMulticastCreate( + mcHandle: *mut cuda_types::CUmemGenericAllocationHandle, + prop: *const cuda_types::CUmulticastObjectProp, + ) -> cuda_types::CUresult; + /** \brief Associate a device to a multicast object. + + Associates a device to a multicast object. The added device will be a part of + the multicast team of size specified by CUmulticastObjectProp::numDevices + during ::cuMulticastCreate. + The association of the device to the multicast object is permanent during + the life time of the multicast object. + All devices must be added to the multicast team before any memory can be + bound to any device in the team. Any calls to ::cuMulticastBindMem or + ::cuMulticastBindAddr will block until all devices have been added. + Similarly all devices must be added to the multicast team before a virtual + address range can be mapped to the multicast object. A call to ::cuMemMap + will block until all devices have been added. + + \param[in] mcHandle Handle representing a multicast object. + \param[in] dev Device that will be associated to the multicast + object. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr*/ + fn cuMulticastAddDevice( + mcHandle: cuda_types::CUmemGenericAllocationHandle, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Bind a memory allocation represented by a handle to a multicast object. + + Binds a memory allocation specified by \p memHandle and created via + ::cuMemCreate to a multicast object represented by \p mcHandle and created + via ::cuMulticastCreate. The intended \p size of the bind, the offset in the + multicast range \p mcOffset as well as the offset in the memory \p memOffset + must be a multiple of the value returned by ::cuMulticastGetGranularity with + the flag ::CU_MULTICAST_GRANULARITY_MINIMUM. For best performance however, + \p size, \p mcOffset and \p memOffset should be aligned to the granularity of + the memory allocation(see ::cuMemGetAllocationGranularity) or to the value + returned by ::cuMulticastGetGranularity with the flag + ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + + The \p size + \p memOffset must be smaller than the size of the allocated + memory. Similarly the \p size + \p mcOffset must be smaller than the size + of the multicast object. + The memory allocation must have beeen created on one of the devices + that was added to the multicast team via ::cuMulticastAddDevice. + Externally shareable as well as imported multicast objects can be bound only + to externally shareable memory. + Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if there are + insufficient resources required to perform the bind. This call may also + return CUDA_ERROR_SYSTEM_NOT_READY if the necessary system software is not + initialized or running. + + \param[in] mcHandle Handle representing a multicast object. + \param[in] mcOffset Offset into the multicast object for attachment. + \param[in] memHandle Handle representing a memory allocation. + \param[in] memOffset Offset into the memory for attachment. + \param[in] size Size of the memory that will be bound to the + multicast object. + \param[in] flags Flags for future use, must be zero for now. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_SYSTEM_NOT_READY + + \sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate*/ + fn cuMulticastBindMem( + mcHandle: cuda_types::CUmemGenericAllocationHandle, + mcOffset: usize, + memHandle: cuda_types::CUmemGenericAllocationHandle, + memOffset: usize, + size: usize, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Bind a memory allocation represented by a virtual address to a multicast object. + + Binds a memory allocation specified by its mapped address \p memptr to a + multicast object represented by \p mcHandle. + The memory must have been allocated via ::cuMemCreate or ::cudaMallocAsync. + The intended \p size of the bind, the offset in the multicast range + \p mcOffset and \p memptr must be a multiple of the value returned by + ::cuMulticastGetGranularity with the flag ::CU_MULTICAST_GRANULARITY_MINIMUM. + For best performance however, \p size, \p mcOffset and \p memptr should be + aligned to the value returned by ::cuMulticastGetGranularity with the flag + ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + + The \p size must be smaller than the size of the allocated memory. + Similarly the \p size + \p mcOffset must be smaller than the total size + of the multicast object. + The memory allocation must have beeen created on one of the devices + that was added to the multicast team via ::cuMulticastAddDevice. + Externally shareable as well as imported multicast objects can be bound only + to externally shareable memory. + Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if there are + insufficient resources required to perform the bind. This call may also + return CUDA_ERROR_SYSTEM_NOT_READY if the necessary system software is not + initialized or running. + + \param[in] mcHandle Handle representing a multicast object. + \param[in] mcOffset Offset into multicast va range for attachment. + \param[in] memptr Virtual address of the memory allocation. + \param[in] size Size of memory that will be bound to the + multicast object. + \param[in] flags Flags for future use, must be zero now. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_OUT_OF_MEMORY, + ::CUDA_ERROR_SYSTEM_NOT_READY + + \sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate*/ + fn cuMulticastBindAddr( + mcHandle: cuda_types::CUmemGenericAllocationHandle, + mcOffset: usize, + memptr: cuda_types::CUdeviceptr, + size: usize, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Unbind any memory allocations bound to a multicast object at a given offset and upto a given size. + + Unbinds any memory allocations hosted on \p dev and bound to a multicast + object at \p mcOffset and upto a given \p size. + The intended \p size of the unbind and the offset in the multicast range + ( \p mcOffset ) must be a multiple of the value returned by + ::cuMulticastGetGranularity flag ::CU_MULTICAST_GRANULARITY_MINIMUM. + The \p size + \p mcOffset must be smaller than the total size of the + multicast object. + + \note + Warning: + The \p mcOffset and the \p size must match the corresponding values specified + during the bind call. Any other values may result in undefined behavior. + + \param[in] mcHandle Handle representing a multicast object. + \param[in] dev Device that hosts the memory allocation. + \param[in] mcOffset Offset into the multicast object. + \param[in] size Desired size to unbind. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMulticastBindMem, ::cuMulticastBindAddr*/ + fn cuMulticastUnbind( + mcHandle: cuda_types::CUmemGenericAllocationHandle, + dev: cuda_types::CUdevice, + mcOffset: usize, + size: usize, + ) -> cuda_types::CUresult; + /** \brief Calculates either the minimal or recommended granularity for multicast object + + Calculates either the minimal or recommended granularity for a given set of + multicast object properties and returns it in granularity. This granularity + can be used as a multiple for size, bind offsets and address mappings of the + multicast object. + + \param[out] granularity Returned granularity. + \param[in] prop Properties of the multicast object. + \param[in] option Determines which granularity to return. + + \returns + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind*/ + fn cuMulticastGetGranularity( + granularity: *mut usize, + prop: *const cuda_types::CUmulticastObjectProp, + option: cuda_types::CUmulticastGranularity_flags, + ) -> cuda_types::CUresult; + /** \brief Returns information about a pointer + + The supported attributes are: + + - ::CU_POINTER_ATTRIBUTE_CONTEXT: + + Returns in \p *data the ::CUcontext in which \p ptr was allocated or + registered. + The type of \p data must be ::CUcontext *. + + If \p ptr was not allocated by, mapped by, or registered with + a ::CUcontext which uses unified virtual addressing then + ::CUDA_ERROR_INVALID_VALUE is returned. + + - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE: + + Returns in \p *data the physical memory type of the memory that + \p ptr addresses as a ::CUmemorytype enumerated value. + The type of \p data must be unsigned int. + + If \p ptr addresses device memory then \p *data is set to + ::CU_MEMORYTYPE_DEVICE. The particular ::CUdevice on which the + memory resides is the ::CUdevice of the ::CUcontext returned by the + ::CU_POINTER_ATTRIBUTE_CONTEXT attribute of \p ptr. + + If \p ptr addresses host memory then \p *data is set to + ::CU_MEMORYTYPE_HOST. + + If \p ptr was not allocated by, mapped by, or registered with + a ::CUcontext which uses unified virtual addressing then + ::CUDA_ERROR_INVALID_VALUE is returned. + + If the current ::CUcontext does not support unified virtual + addressing then ::CUDA_ERROR_INVALID_CONTEXT is returned. + + - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER: + + Returns in \p *data the device pointer value through which + \p ptr may be accessed by kernels running in the current + ::CUcontext. + The type of \p data must be CUdeviceptr *. + + If there exists no device pointer value through which + kernels running in the current ::CUcontext may access + \p ptr then ::CUDA_ERROR_INVALID_VALUE is returned. + + If there is no current ::CUcontext then + ::CUDA_ERROR_INVALID_CONTEXT is returned. + + Except in the exceptional disjoint addressing cases discussed + below, the value returned in \p *data will equal the input + value \p ptr. + + - ::CU_POINTER_ATTRIBUTE_HOST_POINTER: + + Returns in \p *data the host pointer value through which + \p ptr may be accessed by by the host program. + The type of \p data must be void **. + If there exists no host pointer value through which + the host program may directly access \p ptr then + ::CUDA_ERROR_INVALID_VALUE is returned. + + Except in the exceptional disjoint addressing cases discussed + below, the value returned in \p *data will equal the input + value \p ptr. + + - ::CU_POINTER_ATTRIBUTE_P2P_TOKENS: + + Returns in \p *data two tokens for use with the nv-p2p.h Linux + kernel interface. \p data must be a struct of type + CUDA_POINTER_ATTRIBUTE_P2P_TOKENS. + + \p ptr must be a pointer to memory obtained from :cuMemAlloc(). + Note that p2pToken and vaSpaceToken are only valid for the + lifetime of the source allocation. A subsequent allocation at + the same address may return completely different tokens. + Querying this attribute has a side effect of setting the attribute + ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory that + \p ptr points to. + + - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: + + A boolean attribute which when set, ensures that synchronous memory operations + initiated on the region of memory that \p ptr points to will always synchronize. + See further documentation in the section titled "API synchronization behavior" + to learn more about cases when synchronous memory operations can + exhibit asynchronous behavior. + + - ::CU_POINTER_ATTRIBUTE_BUFFER_ID: + + Returns in \p *data a buffer ID which is guaranteed to be unique within the process. + \p data must point to an unsigned long long. + + \p ptr must be a pointer to memory obtained from a CUDA memory allocation API. + Every memory allocation from any of the CUDA memory allocation APIs will + have a unique ID over a process lifetime. Subsequent allocations do not reuse IDs + from previous freed allocations. IDs are only unique within a single process. + + + - ::CU_POINTER_ATTRIBUTE_IS_MANAGED: + + Returns in \p *data a boolean that indicates whether the pointer points to + managed memory or not. + + If \p ptr is not a valid CUDA pointer then ::CUDA_ERROR_INVALID_VALUE is returned. + + - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: + + Returns in \p *data an integer representing a device ordinal of a device against + which the memory was allocated or registered. + + - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: + + Returns in \p *data a boolean that indicates if this pointer maps to + an allocation that is suitable for ::cudaIpcGetMemHandle. + + - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: + + Returns in \p *data the starting address for the allocation referenced + by the device pointer \p ptr. Note that this is not necessarily the + address of the mapped region, but the address of the mappable address + range \p ptr references (e.g. from ::cuMemAddressReserve). + + - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE: + + Returns in \p *data the size for the allocation referenced by the device + pointer \p ptr. Note that this is not necessarily the size of the mapped + region, but the size of the mappable address range \p ptr references + (e.g. from ::cuMemAddressReserve). To retrieve the size of the mapped + region, see ::cuMemGetAddressRange + + - ::CU_POINTER_ATTRIBUTE_MAPPED: + + Returns in \p *data a boolean that indicates if this pointer is in a + valid address range that is mapped to a backing allocation. + + - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: + + Returns a bitmask of the allowed handle types for an allocation that may + be passed to ::cuMemExportToShareableHandle. + + - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: + + Returns in \p *data the handle to the mempool that the allocation was obtained from. + + \par + + Note that for most allocations in the unified virtual address space + the host and device pointer for accessing the allocation will be the + same. The exceptions to this are + - user memory registered using ::cuMemHostRegister + - host memory allocated using ::cuMemHostAlloc with the + ::CU_MEMHOSTALLOC_WRITECOMBINED flag + For these types of allocation there will exist separate, disjoint host + and device addresses for accessing the allocation. In particular + - The host address will correspond to an invalid unmapped device address + (which will result in an exception if accessed from the device) + - The device address will correspond to an invalid unmapped host address + (which will result in an exception if accessed from the host). + For these types of allocations, querying ::CU_POINTER_ATTRIBUTE_HOST_POINTER + and ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to retrieve the host + and device addresses from either address. + + \param data - Returned pointer attribute value + \param attribute - Pointer attribute to query + \param ptr - Pointer + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuPointerSetAttribute, + ::cuMemAlloc, + ::cuMemFree, + ::cuMemAllocHost, + ::cuMemFreeHost, + ::cuMemHostAlloc, + ::cuMemHostRegister, + ::cuMemHostUnregister, + ::cudaPointerGetAttributes*/ + fn cuPointerGetAttribute( + data: *mut ::core::ffi::c_void, + attribute: cuda_types::CUpointer_attribute, + ptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Prefetches memory to the specified destination device + + Note there is a later version of this API, ::cuMemPrefetchAsync_v2. It will + supplant this version in 13.0, which is retained for minor version compatibility. + + Prefetches memory to the specified destination device. \p devPtr is the + base device pointer of the memory to be prefetched and \p dstDevice is the + destination device. \p count specifies the number of bytes to copy. \p hStream + is the stream in which the operation is enqueued. The memory range must refer + to managed memory allocated via ::cuMemAllocManaged or declared via __managed__ variables. + + Passing in CU_DEVICE_CPU for \p dstDevice will prefetch the data to host memory. If + \p dstDevice is a GPU, then the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + must be non-zero. Additionally, \p hStream must be associated with a device that has a + non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + + The start address and end address of the memory range will be rounded down and rounded up + respectively to be aligned to CPU page size before the prefetch operation is enqueued + in the stream. + + If no physical memory has been allocated for this region, then this memory region + will be populated and mapped on the destination device. If there's insufficient + memory to prefetch the desired region, the Unified Memory driver may evict pages from other + ::cuMemAllocManaged allocations to host memory in order to make room. Device memory + allocated using ::cuMemAlloc or ::cuArrayCreate will not be evicted. + + By default, any mappings to the previous location of the migrated pages are removed and + mappings for the new location are only setup on \p dstDevice. The exact behavior however + also depends on the settings applied to this memory range via ::cuMemAdvise as described + below: + + If ::CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of this memory range, + then that subset will create a read-only copy of the pages on \p dstDevice. + + If ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any subset of this memory + range, then the pages will be migrated to \p dstDevice even if \p dstDevice is not the + preferred location of any pages in the memory range. + + If ::CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset of this memory range, + then mappings to those pages from all the appropriate processors are updated to + refer to the new location if establishing such a mapping is possible. Otherwise, + those mappings are cleared. + + Note that this API is not required for functionality and only serves to improve performance + by allowing the application to migrate data to a suitable location before it is accessed. + Memory accesses to this range are always coherent and are allowed even when the data is + actively being migrated. + + Note that this function is asynchronous with respect to the host and all work + on other devices. + + \param devPtr - Pointer to be prefetched + \param count - Size in bytes + \param dstDevice - Destination device to prefetch to + \param hStream - Stream to enqueue prefetch operation + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + ::cuMemcpy3DPeerAsync, ::cuMemAdvise, ::cuMemPrefetchAsync + ::cudaMemPrefetchAsync_v2*/ + fn cuMemPrefetchAsync_ptsz( + devPtr: cuda_types::CUdeviceptr, + count: usize, + dstDevice: cuda_types::CUdevice, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Prefetches memory to the specified destination location + + Prefetches memory to the specified destination location. \p devPtr is the + base device pointer of the memory to be prefetched and \p location specifies the + destination location. \p count specifies the number of bytes to copy. \p hStream + is the stream in which the operation is enqueued. The memory range must refer + to managed memory allocated via ::cuMemAllocManaged or declared via __managed__ variables. + + Specifying ::CU_MEM_LOCATION_TYPE_DEVICE for ::CUmemLocation::type will prefetch memory to GPU + specified by device ordinal ::CUmemLocation::id which must have non-zero value for the device attribute + ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Additionally, \p hStream must be associated with a device + that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + Specifying ::CU_MEM_LOCATION_TYPE_HOST as ::CUmemLocation::type will prefetch data to host memory. + Applications can request prefetching memory to a specific host NUMA node by specifying + ::CU_MEM_LOCATION_TYPE_HOST_NUMA for ::CUmemLocation::type and a valid host NUMA node id in ::CUmemLocation::id + Users can also request prefetching memory to the host NUMA node closest to the current thread's CPU by specifying + ::CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT for ::CUmemLocation::type. Note when ::CUmemLocation::type is etiher + ::CU_MEM_LOCATION_TYPE_HOST OR ::CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT, ::CUmemLocation::id will be ignored. + + The start address and end address of the memory range will be rounded down and rounded up + respectively to be aligned to CPU page size before the prefetch operation is enqueued + in the stream. + + If no physical memory has been allocated for this region, then this memory region + will be populated and mapped on the destination device. If there's insufficient + memory to prefetch the desired region, the Unified Memory driver may evict pages from other + ::cuMemAllocManaged allocations to host memory in order to make room. Device memory + allocated using ::cuMemAlloc or ::cuArrayCreate will not be evicted. + + By default, any mappings to the previous location of the migrated pages are removed and + mappings for the new location are only setup on the destination location. The exact behavior however + also depends on the settings applied to this memory range via ::cuMemAdvise as described + below: + + If ::CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of this memory range, + then that subset will create a read-only copy of the pages on destination location. + If however the destination location is a host NUMA node, then any pages of that subset + that are already in another host NUMA node will be transferred to the destination. + + If ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any subset of this memory + range, then the pages will be migrated to \p location even if \p location is not the + preferred location of any pages in the memory range. + + If ::CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset of this memory range, + then mappings to those pages from all the appropriate processors are updated to + refer to the new location if establishing such a mapping is possible. Otherwise, + those mappings are cleared. + + Note that this API is not required for functionality and only serves to improve performance + by allowing the application to migrate data to a suitable location before it is accessed. + Memory accesses to this range are always coherent and are allowed even when the data is + actively being migrated. + + Note that this function is asynchronous with respect to the host and all work + on other devices. + + \param devPtr - Pointer to be prefetched + \param count - Size in bytes + \param dstDevice - Destination device to prefetch to + \param flags - flags for future use, must be zero now. + \param hStream - Stream to enqueue prefetch operation + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + ::cuMemcpy3DPeerAsync, ::cuMemAdvise, ::cuMemPrefetchAsync + ::cudaMemPrefetchAsync_v2*/ + fn cuMemPrefetchAsync_v2_ptsz( + devPtr: cuda_types::CUdeviceptr, + count: usize, + location: cuda_types::CUmemLocation, + flags: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Advise about the usage of a given memory range + + Note there is a later version of this API, ::cuMemAdvise_v2. It will + supplant this version in 13.0, which is retained for minor version compatibility. + + Advise the Unified Memory subsystem about the usage pattern for the memory range + starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory + range will be rounded down and rounded up respectively to be aligned to CPU page size before the + advice is applied. The memory range must refer to managed memory allocated via ::cuMemAllocManaged + or declared via __managed__ variables. The memory range could also refer to system-allocated pageable + memory provided it represents a valid, host-accessible region of memory and all additional constraints + imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable + memory range results in an error being returned. + + The \p advice parameter can take the following values: + - ::CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data is mostly going to be read + from and only occasionally written to. Any read accesses from any processor to this region will create a + read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cuMemPrefetchAsync + is called on this region, it will create a read-only copy of the data on the destination processor. + If any processor writes to this region, all copies of the corresponding page will be invalidated + except for the one where the write occurred. The \p device argument is ignored for this advice. + Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU + that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + Also, if a context is created on a device that does not have the device attribute + ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS set, then read-duplication will not occur until + all such contexts are destroyed. + If the memory region refers to valid system-allocated pageable memory, then the accessing device must + have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read-only + copy to be created on that device. Note however that if the accessing device also has a non-zero value for the + device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, then setting this advice + will not create a read-only copy when that device accesses this memory region. + + - ::CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the + Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated + copies of the data will be collapsed into a single copy. The location for the collapsed + copy will be the preferred location if the page has a preferred location and one of the read-duplicated + copies was resident at that location. Otherwise, the location chosen is arbitrary. + + - ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets the preferred location for the + data to be the memory belonging to \p device. Passing in CU_DEVICE_CPU for \p device sets the + preferred location as host memory. If \p device is a GPU, then it must have a non-zero value for the + device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting the preferred location + does not cause data to migrate to that location immediately. Instead, it guides the migration policy + when a fault occurs on that memory region. If the data is already in its preferred location and the + faulting processor can establish a mapping without requiring the data to be migrated, then + data migration will be avoided. On the other hand, if the data is not in its preferred location + or if a direct mapping cannot be established, then it will be migrated to the processor accessing + it. It is important to note that setting the preferred location does not prevent data prefetching + done using ::cuMemPrefetchAsync. + Having a preferred location can override the page thrash detection and resolution logic in the Unified + Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device + memory, the page may eventually be pinned to host memory by the Unified Memory driver. But + if the preferred location is set as device memory, then the page will continue to thrash indefinitely. + If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + policies associated with that advice will override the policies of this advice, unless read accesses from + \p device will not result in a read-only copy being created on that device as outlined in description for + the advice ::CU_MEM_ADVISE_SET_READ_MOSTLY. + If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. + + - ::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect of ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION + and changes the preferred location to none. + + - ::CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that the data will be accessed by \p device. + Passing in ::CU_DEVICE_CPU for \p device will set the advice for the CPU. If \p device is a GPU, then + the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non-zero. + This advice does not cause data migration and has no impact on the location of the data per se. Instead, + it causes the data to always be mapped in the specified processor's page tables, as long as the + location of the data permits a mapping to be established. If the data gets migrated for any reason, + the mappings are updated accordingly. + This advice is recommended in scenarios where data locality is not important, but avoiding faults is. + Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the + data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data + over to the other GPUs is not as important because the accesses are infrequent and the overhead of + migration may be too high. But preventing faults can still help improve performance, and so having + a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated + to host memory because the CPU typically cannot access device memory directly. Any GPU that had the + ::CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will now have its mapping updated to point to the + page in host memory. + If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + policies associated with that advice will override the policies of this advice. Additionally, if the + preferred location of this memory region or any subset of it is also \p device, then the policies + associated with ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the policies of this advice. + If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + then this call has no effect. + + - ::CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of ::CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to + the data from \p device may be removed at any time causing accesses to result in non-fatal page faults. + If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + then this call has no effect. + + \param devPtr - Pointer to memory to set the advice for + \param count - Size in bytes of the memory range + \param advice - Advice to be applied for the specified memory range + \param device - Device to apply the advice for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + ::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, ::cuMemAdvise_v2 + ::cudaMemAdvise*/ + fn cuMemAdvise( + devPtr: cuda_types::CUdeviceptr, + count: usize, + advice: cuda_types::CUmem_advise, + device: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Advise about the usage of a given memory range + + Advise the Unified Memory subsystem about the usage pattern for the memory range + starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory + range will be rounded down and rounded up respectively to be aligned to CPU page size before the + advice is applied. The memory range must refer to managed memory allocated via ::cuMemAllocManaged + or declared via __managed__ variables. The memory range could also refer to system-allocated pageable + memory provided it represents a valid, host-accessible region of memory and all additional constraints + imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable + memory range results in an error being returned. + + The \p advice parameter can take the following values: + - ::CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data is mostly going to be read + from and only occasionally written to. Any read accesses from any processor to this region will create a + read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cuMemPrefetchAsync + or ::cuMemPrefetchAsync_v2 is called on this region, it will create a read-only copy of the data on the destination processor. + If the target location for ::cuMemPrefetchAsync_v2 is a host NUMA node and a read-only copy already exists on + another host NUMA node, that copy will be migrated to the targeted host NUMA node. + If any processor writes to this region, all copies of the corresponding page will be invalidated + except for the one where the write occurred. If the writing processor is the CPU and the preferred location of + the page is a host NUMA node, then the page will also be migrated to that host NUMA node. The \p location argument is ignored for this advice. + Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU + that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + Also, if a context is created on a device that does not have the device attribute + ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS set, then read-duplication will not occur until + all such contexts are destroyed. + If the memory region refers to valid system-allocated pageable memory, then the accessing device must + have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read-only + copy to be created on that device. Note however that if the accessing device also has a non-zero value for the + device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, then setting this advice + will not create a read-only copy when that device accesses this memory region. + + - ::CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the + Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated + copies of the data will be collapsed into a single copy. The location for the collapsed + copy will be the preferred location if the page has a preferred location and one of the read-duplicated + copies was resident at that location. Otherwise, the location chosen is arbitrary. + Note: The \p location argument is ignored for this advice. + + - ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets the preferred location for the + data to be the memory belonging to \p location. When ::CUmemLocation::type is ::CU_MEM_LOCATION_TYPE_HOST, + ::CUmemLocation::id is ignored and the preferred location is set to be host memory. To set the preferred location + to a specific host NUMA node, applications must set ::CUmemLocation::type to ::CU_MEM_LOCATION_TYPE_HOST_NUMA and + ::CUmemLocation::id must specify the NUMA ID of the host NUMA node. If ::CUmemLocation::type is set to ::CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT, + ::CUmemLocation::id will be ignored and the the host NUMA node closest to the calling thread's CPU will be used as the preferred location. + If ::CUmemLocation::type is a ::CU_MEM_LOCATION_TYPE_DEVICE, then ::CUmemLocation::id must be a valid device ordinal + and the device must have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + Setting the preferred location does not cause data to migrate to that location immediately. Instead, it guides the migration policy + when a fault occurs on that memory region. If the data is already in its preferred location and the + faulting processor can establish a mapping without requiring the data to be migrated, then + data migration will be avoided. On the other hand, if the data is not in its preferred location + or if a direct mapping cannot be established, then it will be migrated to the processor accessing + it. It is important to note that setting the preferred location does not prevent data prefetching + done using ::cuMemPrefetchAsync. + Having a preferred location can override the page thrash detection and resolution logic in the Unified + Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device + memory, the page may eventually be pinned to host memory by the Unified Memory driver. But + if the preferred location is set as device memory, then the page will continue to thrash indefinitely. + If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + policies associated with that advice will override the policies of this advice, unless read accesses from + \p location will not result in a read-only copy being created on that procesor as outlined in description for + the advice ::CU_MEM_ADVISE_SET_READ_MOSTLY. + If the memory region refers to valid system-allocated pageable memory, and ::CUmemLocation::type is CU_MEM_LOCATION_TYPE_DEVICE + then ::CUmemLocation::id must be a valid device that has a non-zero alue for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. + + - ::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect of ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION + and changes the preferred location to none. The \p location argument is ignored for this advice. + + - ::CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that the data will be accessed by processor \p location. + The ::CUmemLocation::type must be either ::CU_MEM_LOCATION_TYPE_DEVICE with ::CUmemLocation::id representing a valid device + ordinal or ::CU_MEM_LOCATION_TYPE_HOST and ::CUmemLocation::id will be ignored. All other location types are invalid. + If ::CUmemLocation::id is a GPU, then the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non-zero. + This advice does not cause data migration and has no impact on the location of the data per se. Instead, + it causes the data to always be mapped in the specified processor's page tables, as long as the + location of the data permits a mapping to be established. If the data gets migrated for any reason, + the mappings are updated accordingly. + This advice is recommended in scenarios where data locality is not important, but avoiding faults is. + Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the + data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data + over to the other GPUs is not as important because the accesses are infrequent and the overhead of + migration may be too high. But preventing faults can still help improve performance, and so having + a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated + to host memory because the CPU typically cannot access device memory directly. Any GPU that had the + ::CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will now have its mapping updated to point to the + page in host memory. + If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + policies associated with that advice will override the policies of this advice. Additionally, if the + preferred location of this memory region or any subset of it is also \p location, then the policies + associated with ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the policies of this advice. + If the memory region refers to valid system-allocated pageable memory, and ::CUmemLocation::type is ::CU_MEM_LOCATION_TYPE_DEVICE + then device in ::CUmemLocation::id must have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. + Additionally, if ::CUmemLocation::id has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + then this call has no effect. + + - ::CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of ::CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to + the data from \p location may be removed at any time causing accesses to result in non-fatal page faults. + If the memory region refers to valid system-allocated pageable memory, and ::CUmemLocation::type is ::CU_MEM_LOCATION_TYPE_DEVICE + then device in ::CUmemLocation::id must have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. + Additionally, if ::CUmemLocation::id has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + then this call has no effect. + + \param devPtr - Pointer to memory to set the advice for + \param count - Size in bytes of the memory range + \param advice - Advice to be applied for the specified memory range + \param location - location to apply the advice for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + ::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, ::cuMemAdvise + ::cudaMemAdvise*/ + fn cuMemAdvise_v2( + devPtr: cuda_types::CUdeviceptr, + count: usize, + advice: cuda_types::CUmem_advise, + location: cuda_types::CUmemLocation, + ) -> cuda_types::CUresult; + /** \brief Query an attribute of a given memory range + + Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The + memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via + __managed__ variables. + + The \p attribute parameter can take the following values: + - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is specified, \p data will be interpreted + as a 32-bit integer, and \p dataSize must be 4. The result returned will be 1 if all pages in the given + memory range have read-duplication enabled, or 0 otherwise. + - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this attribute is specified, \p data will be + interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be a GPU device + id if all pages in the memory range have that GPU as their preferred location, or it will be CU_DEVICE_CPU + if all pages in the memory range have the CPU as their preferred location, or it will be CU_DEVICE_INVALID + if either all the pages don't have the same preferred location or some of the pages don't have a + preferred location at all. Note that the actual location of the pages in the memory range at the time of + the query may be different from the preferred location. + - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is specified, \p data will be interpreted + as an array of 32-bit integers, and \p dataSize must be a non-zero multiple of 4. The result returned + will be a list of device ids that had ::CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory range. + If any device does not have that advice set for the entire memory range, that device will not be included. + If \p data is larger than the number of devices that have that advice set for that memory range, + CU_DEVICE_INVALID will be returned in all the extra space provided. For ex., if \p dataSize is 12 + (i.e. \p data has 3 elements) and only device 0 has the advice set, then the result returned will be + { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If \p data is smaller than the number of devices that have + that advice set, then only as many devices will be returned as can fit in the array. There is no + guarantee on which specific devices will be returned, however. + - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this attribute is specified, \p data will be + interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be the last location + to which all pages in the memory range were prefetched explicitly via ::cuMemPrefetchAsync. This will either be + a GPU id or CU_DEVICE_CPU depending on whether the last location for prefetch was a GPU or the CPU + respectively. If any page in the memory range was never explicitly prefetched or if all pages were not + prefetched to the same location, CU_DEVICE_INVALID will be returned. Note that this simply returns the + last location that the application requested to prefetch the memory range to. It gives no indication as to + whether the prefetch operation to that location has completed or even begun. + - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE: If this attribute is specified, \p data will be + interpreted as a ::CUmemLocationType, and \p dataSize must be sizeof(CUmemLocationType). The ::CUmemLocationType returned will be + ::CU_MEM_LOCATION_TYPE_DEVICE if all pages in the memory range have the same GPU as their preferred location, or ::CUmemLocationType + will be ::CU_MEM_LOCATION_TYPE_HOST if all pages in the memory range have the CPU as their preferred location, or it will be ::CU_MEM_LOCATION_TYPE_HOST_NUMA + if all the pages in the memory range have the same host NUMA node ID as their preferred location or it will be ::CU_MEM_LOCATION_TYPE_INVALID + if either all the pages don't have the same preferred location or some of the pages don't have a preferred location at all. + Note that the actual location type of the pages in the memory range at the time of the query may be different from the preferred location type. + - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID: If this attribute is specified, \p data will be + interpreted as a 32-bit integer, and \p dataSize must be 4. If the ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE query for the same address range + returns ::CU_MEM_LOCATION_TYPE_DEVICE, it will be a valid device ordinal or if it returns ::CU_MEM_LOCATION_TYPE_HOST_NUMA, it will be a valid host NUMA node ID + or if it returns any other location type, the id should be ignored. + - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE: If this attribute is specified, \p data will be + interpreted as a ::CUmemLocationType, and \p dataSize must be sizeof(CUmemLocationType). The result returned will be the last location + to which all pages in the memory range were prefetched explicitly via ::cuMemPrefetchAsync. The ::CUmemLocationType returned + will be ::CU_MEM_LOCATION_TYPE_DEVICE if the last prefetch location was a GPU or ::CU_MEM_LOCATION_TYPE_HOST if it was the CPU or ::CU_MEM_LOCATION_TYPE_HOST_NUMA if + the last prefetch location was a specific host NUMA node. If any page in the memory range was never explicitly prefetched or if all pages were not + prefetched to the same location, ::CUmemLocationType will be ::CU_MEM_LOCATION_TYPE_INVALID. + Note that this simply returns the last location type that the application requested to prefetch the memory range to. It gives no indication as to + whether the prefetch operation to that location has completed or even begun. + - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID: If this attribute is specified, \p data will be + interpreted as a 32-bit integer, and \p dataSize must be 4. If the ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE query for the same address range + returns ::CU_MEM_LOCATION_TYPE_DEVICE, it will be a valid device ordinal or if it returns ::CU_MEM_LOCATION_TYPE_HOST_NUMA, it will be a valid host NUMA node ID + or if it returns any other location type, the id should be ignored. + + \param data - A pointers to a memory location where the result + of each attribute query will be written to. + \param dataSize - Array containing the size of data + \param attribute - The attribute to query + \param devPtr - Start of the range to query + \param count - Size of the range to query + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + \note_async + \note_null_stream + + \sa ::cuMemRangeGetAttributes, ::cuMemPrefetchAsync, + ::cuMemAdvise, + ::cudaMemRangeGetAttribute*/ + fn cuMemRangeGetAttribute( + data: *mut ::core::ffi::c_void, + dataSize: usize, + attribute: cuda_types::CUmem_range_attribute, + devPtr: cuda_types::CUdeviceptr, + count: usize, + ) -> cuda_types::CUresult; + /** \brief Query attributes of a given memory range. + + Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The + memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via + __managed__ variables. The \p attributes array will be interpreted to have \p numAttributes + entries. The \p dataSizes array will also be interpreted to have \p numAttributes entries. + The results of the query will be stored in \p data. + + The list of supported attributes are given below. Please refer to ::cuMemRangeGetAttribute for + attribute descriptions and restrictions. + + - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY + - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION + - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY + - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION + - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE + - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID + - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE + - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID + + \param data - A two-dimensional array containing pointers to memory + locations where the result of each attribute query will be written to. + \param dataSizes - Array containing the sizes of each result + \param attributes - An array of attributes to query + (numAttributes and the number of attributes in this array should match) + \param numAttributes - Number of attributes to query + \param devPtr - Start of the range to query + \param count - Size of the range to query + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa ::cuMemRangeGetAttribute, ::cuMemAdvise, + ::cuMemPrefetchAsync, + ::cudaMemRangeGetAttributes*/ + fn cuMemRangeGetAttributes( + data: *mut *mut ::core::ffi::c_void, + dataSizes: *mut usize, + attributes: *mut cuda_types::CUmem_range_attribute, + numAttributes: usize, + devPtr: cuda_types::CUdeviceptr, + count: usize, + ) -> cuda_types::CUresult; + /** \brief Set attributes on a previously allocated memory region + + The supported attributes are: + + - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: + + A boolean attribute that can either be set (1) or unset (0). When set, + the region of memory that \p ptr points to is guaranteed to always synchronize + memory operations that are synchronous. If there are some previously initiated + synchronous memory operations that are pending when this attribute is set, the + function does not return until those memory operations are complete. + See further documentation in the section titled "API synchronization behavior" + to learn more about cases when synchronous memory operations can + exhibit asynchronous behavior. + \p value will be considered as a pointer to an unsigned integer to which this attribute is to be set. + + \param value - Pointer to memory containing the value to be set + \param attribute - Pointer attribute to set + \param ptr - Pointer to a memory region allocated using CUDA memory allocation APIs + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa ::cuPointerGetAttribute, + ::cuPointerGetAttributes, + ::cuMemAlloc, + ::cuMemFree, + ::cuMemAllocHost, + ::cuMemFreeHost, + ::cuMemHostAlloc, + ::cuMemHostRegister, + ::cuMemHostUnregister*/ + fn cuPointerSetAttribute( + value: *const ::core::ffi::c_void, + attribute: cuda_types::CUpointer_attribute, + ptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Returns information about a pointer. + + The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions): + + - ::CU_POINTER_ATTRIBUTE_CONTEXT + - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE + - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER + - ::CU_POINTER_ATTRIBUTE_HOST_POINTER + - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS + - ::CU_POINTER_ATTRIBUTE_BUFFER_ID + - ::CU_POINTER_ATTRIBUTE_IS_MANAGED + - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL + - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR + - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE + - ::CU_POINTER_ATTRIBUTE_MAPPED + - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE + - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES + - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE + + \param numAttributes - Number of attributes to query + \param attributes - An array of attributes to query + (numAttributes and the number of attributes in this array should match) + \param data - A two-dimensional array containing pointers to memory + locations where the result of each attribute query will be written to. + \param ptr - Pointer to query + + Unlike ::cuPointerGetAttribute, this function will not return an error when the \p ptr + encountered is not a valid CUDA pointer. Instead, the attributes are assigned default NULL values + and CUDA_SUCCESS is returned. + + If \p ptr was not allocated by, mapped by, or registered with a ::CUcontext which uses UVA + (Unified Virtual Addressing), ::CUDA_ERROR_INVALID_CONTEXT is returned. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuPointerGetAttribute, + ::cuPointerSetAttribute, + ::cudaPointerGetAttributes*/ + fn cuPointerGetAttributes( + numAttributes: ::core::ffi::c_uint, + attributes: *mut cuda_types::CUpointer_attribute, + data: *mut *mut ::core::ffi::c_void, + ptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Create a stream + + Creates a stream and returns a handle in \p phStream. The \p Flags argument + determines behaviors of the stream. + + Valid values for \p Flags are: + - ::CU_STREAM_DEFAULT: Default stream creation flag. + - ::CU_STREAM_NON_BLOCKING: Specifies that work running in the created + stream may run concurrently with work in stream 0 (the NULL stream), and that + the created stream should perform no implicit synchronization with stream 0. + + \param phStream - Returned newly created stream + \param Flags - Parameters for stream creation + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuStreamDestroy, + ::cuStreamCreateWithPriority, + ::cuStreamGetPriority, + ::cuStreamGetFlags, + ::cuStreamWaitEvent, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamAddCallback, + ::cudaStreamCreate, + ::cudaStreamCreateWithFlags*/ + fn cuStreamCreate( + phStream: *mut cuda_types::CUstream, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Create a stream with the given priority + + Creates a stream with the specified priority and returns a handle in \p phStream. + This affects the scheduling priority of work in the stream. Priorities provide a + hint to preferentially run work with higher priority when possible, but do + not preempt already-running work or provide any other functional guarantee on + execution order. + + \p priority follows a convention where lower numbers represent higher priorities. + '0' represents default priority. The range of meaningful numerical priorities can + be queried using ::cuCtxGetStreamPriorityRange. If the specified priority is + outside the numerical range returned by ::cuCtxGetStreamPriorityRange, + it will automatically be clamped to the lowest or the highest number in the range. + + \param phStream - Returned newly created stream + \param flags - Flags for stream creation. See ::cuStreamCreate for a list of + valid flags + \param priority - Stream priority. Lower numbers represent higher priorities. + See ::cuCtxGetStreamPriorityRange for more information about + meaningful stream priorities that can be passed. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \note Stream priorities are supported only on GPUs + with compute capability 3.5 or higher. + + \note In the current implementation, only compute kernels launched in + priority streams are affected by the stream's priority. Stream priorities have + no effect on host-to-device and device-to-host memory operations. + + \sa ::cuStreamDestroy, + ::cuStreamCreate, + ::cuStreamGetPriority, + ::cuCtxGetStreamPriorityRange, + ::cuStreamGetFlags, + ::cuStreamWaitEvent, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamAddCallback, + ::cudaStreamCreateWithPriority*/ + fn cuStreamCreateWithPriority( + phStream: *mut cuda_types::CUstream, + flags: ::core::ffi::c_uint, + priority: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Query the priority of a given stream + + Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority + and return the priority in \p priority. Note that if the stream was created with a + priority outside the numerical range returned by ::cuCtxGetStreamPriorityRange, + this function returns the clamped priority. + See ::cuStreamCreateWithPriority for details about priority clamping. + + \param hStream - Handle to the stream to be queried + \param priority - Pointer to a signed integer in which the stream's priority is returned + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuStreamDestroy, + ::cuStreamCreate, + ::cuStreamCreateWithPriority, + ::cuCtxGetStreamPriorityRange, + ::cuStreamGetFlags, + ::cudaStreamGetPriority*/ + fn cuStreamGetPriority_ptsz( + hStream: cuda_types::CUstream, + priority: *mut ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Query the flags of a given stream + + Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority + and return the flags in \p flags. + + \param hStream - Handle to the stream to be queried + \param flags - Pointer to an unsigned integer in which the stream's flags are returned + The value returned in \p flags is a logical 'OR' of all flags that + were used while creating this stream. See ::cuStreamCreate for the list + of valid flags + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuStreamDestroy, + ::cuStreamCreate, + ::cuStreamGetPriority, + ::cudaStreamGetFlags*/ + fn cuStreamGetFlags_ptsz( + hStream: cuda_types::CUstream, + flags: *mut ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Returns the unique Id associated with the stream handle supplied + + Returns in \p streamId the unique Id which is associated with the given stream handle. + The Id is unique for the life of the program. + + The stream handle \p hStream can refer to any of the following: +
    +
  • a stream created via any of the CUDA driver APIs such as ::cuStreamCreate + and ::cuStreamCreateWithPriority, or their runtime API equivalents such as + ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority. + Passing an invalid handle will result in undefined behavior.
  • +
  • any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and + ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted, + which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively.
  • +
+ + \param hStream - Handle to the stream to be queried + \param streamId - Pointer to store the Id of the stream + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuStreamDestroy, + ::cuStreamCreate, + ::cuStreamGetPriority, + ::cudaStreamGetId*/ + fn cuStreamGetId_ptsz( + hStream: cuda_types::CUstream, + streamId: *mut ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Query the context associated with a stream + + Returns the CUDA context that the stream is associated with. + + The stream handle \p hStream can refer to any of the following: +
    +
  • a stream created via any of the CUDA driver APIs such as ::cuStreamCreate + and ::cuStreamCreateWithPriority, or their runtime API equivalents such as + ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority. + The returned context is the context that was active in the calling thread when the + stream was created. Passing an invalid handle will result in undefined behavior.
  • +
  • any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and + ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted, + which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively. + Specifying any of the special handles will return the context current to the + calling thread. If no context is current to the calling thread, + ::CUDA_ERROR_INVALID_CONTEXT is returned.
  • +
+ + \param hStream - Handle to the stream to be queried + \param pctx - Returned context associated with the stream + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + \notefnerr + + \sa ::cuStreamDestroy, + ::cuStreamCreateWithPriority, + ::cuStreamGetPriority, + ::cuStreamGetFlags, + ::cuStreamWaitEvent, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamAddCallback, + ::cudaStreamCreate, + ::cudaStreamCreateWithFlags*/ + fn cuStreamGetCtx_ptsz( + hStream: cuda_types::CUstream, + pctx: *mut cuda_types::CUcontext, + ) -> cuda_types::CUresult; + /** \brief Make a compute stream wait on an event + + Makes all future work submitted to \p hStream wait for all work captured in + \p hEvent. See ::cuEventRecord() for details on what is captured by an event. + The synchronization will be performed efficiently on the device when applicable. + \p hEvent may be from a different context or device than \p hStream. + + flags include: + - ::CU_EVENT_WAIT_DEFAULT: Default event creation flag. + - ::CU_EVENT_WAIT_EXTERNAL: Event is captured in the graph as an external + event node when performing stream capture. This flag is invalid outside + of stream capture. + + \param hStream - Stream to wait + \param hEvent - Event to wait on (may not be NULL) + \param Flags - See ::CUevent_capture_flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + \note_null_stream + \notefnerr + + \sa ::cuStreamCreate, + ::cuEventRecord, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamAddCallback, + ::cuStreamDestroy, + ::cudaStreamWaitEvent*/ + fn cuStreamWaitEvent_ptsz( + hStream: cuda_types::CUstream, + hEvent: cuda_types::CUevent, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Add a callback to a compute stream + + \note This function is slated for eventual deprecation and removal. If + you do not require the callback to execute in case of a device error, + consider using ::cuLaunchHostFunc. Additionally, this function is not + supported with ::cuStreamBeginCapture and ::cuStreamEndCapture, unlike + ::cuLaunchHostFunc. + + Adds a callback to be called on the host after all currently enqueued + items in the stream have completed. For each + cuStreamAddCallback call, the callback will be executed exactly once. + The callback will block later work in the stream until it is finished. + + The callback may be passed ::CUDA_SUCCESS or an error code. In the event + of a device error, all subsequently executed callbacks will receive an + appropriate ::CUresult. + + Callbacks must not make any CUDA API calls. Attempting to use a CUDA API + will result in ::CUDA_ERROR_NOT_PERMITTED. Callbacks must not perform any + synchronization that may depend on outstanding device work or other callbacks + that are not mandated to run earlier. Callbacks without a mandated order + (in independent streams) execute in undefined order and may be serialized. + + For the purposes of Unified Memory, callback execution makes a number of + guarantees: +
    +
  • The callback stream is considered idle for the duration of the + callback. Thus, for example, a callback may always use memory attached + to the callback stream.
  • +
  • The start of execution of a callback has the same effect as + synchronizing an event recorded in the same stream immediately prior to + the callback. It thus synchronizes streams which have been "joined" + prior to the callback.
  • +
  • Adding device work to any stream does not have the effect of making + the stream active until all preceding host functions and stream callbacks + have executed. Thus, for + example, a callback might use global attached memory even if work has + been added to another stream, if the work has been ordered behind the + callback with an event.
  • +
  • Completion of a callback does not cause a stream to become + active except as described above. The callback stream will remain idle + if no device work follows the callback, and will remain idle across + consecutive callbacks without device work in between. Thus, for example, + stream synchronization can be done by signaling from a callback at the + end of the stream.
  • +
+ + \param hStream - Stream to add callback to + \param callback - The function to call once preceding stream operations are complete + \param userData - User specified data to be passed to the callback function + \param flags - Reserved for future use, must be 0 + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_SUPPORTED + \note_null_stream + \notefnerr + + \sa ::cuStreamCreate, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamWaitEvent, + ::cuStreamDestroy, + ::cuMemAllocManaged, + ::cuStreamAttachMemAsync, + ::cuLaunchHostFunc, + ::cudaStreamAddCallback*/ + fn cuStreamAddCallback_ptsz( + hStream: cuda_types::CUstream, + callback: cuda_types::CUstreamCallback, + userData: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Begins graph capture on a stream + + Begin graph capture on \p hStream. When a stream is in capture mode, all operations + pushed into the stream will not be executed, but will instead be captured into + a graph, which will be returned via ::cuStreamEndCapture. Capture may not be initiated + if \p stream is CU_STREAM_LEGACY. Capture must be ended on the same stream in which + it was initiated, and it may only be initiated if the stream is not already in capture + mode. The capture mode may be queried via ::cuStreamIsCapturing. A unique id + representing the capture sequence may be queried via ::cuStreamGetCaptureInfo. + + If \p mode is not ::CU_STREAM_CAPTURE_MODE_RELAXED, ::cuStreamEndCapture must be + called on this stream from the same thread. + + \param hStream - Stream in which to initiate capture + \param mode - Controls the interaction of this capture sequence with other API + calls that are potentially unsafe. For more details see + ::cuThreadExchangeStreamCaptureMode. + + \note Kernels captured using this API must not use texture and surface references. + Reading or writing through any texture or surface reference is undefined + behavior. This restriction does not apply to texture and surface objects. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cuStreamCreate, + ::cuStreamIsCapturing, + ::cuStreamEndCapture, + ::cuThreadExchangeStreamCaptureMode*/ + fn cuStreamBeginCapture_v2_ptsz( + hStream: cuda_types::CUstream, + mode: cuda_types::CUstreamCaptureMode, + ) -> cuda_types::CUresult; + /** \brief Begins graph capture on a stream to an existing graph + + Begin graph capture on \p hStream, placing new nodes into an existing graph. When a stream is + in capture mode, all operations pushed into the stream will not be executed, but will instead + be captured into \p hGraph. The graph will not be instantiable until the user calls + ::cuStreamEndCapture. + + Capture may not be initiated if \p stream is CU_STREAM_LEGACY. Capture must be ended on the + same stream in which it was initiated, and it may only be initiated if the stream is not + already in capture mode. The capture mode may be queried via ::cuStreamIsCapturing. A unique id + representing the capture sequence may be queried via ::cuStreamGetCaptureInfo. + + If \p mode is not ::CU_STREAM_CAPTURE_MODE_RELAXED, ::cuStreamEndCapture must be + called on this stream from the same thread. + + \param hStream - Stream in which to initiate capture. + \param hGraph - Graph to capture into. + \param dependencies - Dependencies of the first node captured in the stream. Can be NULL if numDependencies is 0. + \param dependencyData - Optional array of data associated with each dependency. + \param numDependencies - Number of dependencies. + \param mode - Controls the interaction of this capture sequence with other API + calls that are potentially unsafe. For more details see + ::cuThreadExchangeStreamCaptureMode. + + \note Kernels captured using this API must not use texture and surface references. + Reading or writing through any texture or surface reference is undefined + behavior. This restriction does not apply to texture and surface objects. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cuStreamBeginCapture, + ::cuStreamCreate, + ::cuStreamIsCapturing, + ::cuStreamEndCapture, + ::cuThreadExchangeStreamCaptureMode, + ::cuGraphAddNode,*/ + fn cuStreamBeginCaptureToGraph_ptsz( + hStream: cuda_types::CUstream, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + mode: cuda_types::CUstreamCaptureMode, + ) -> cuda_types::CUresult; + /** \brief Swaps the stream capture interaction mode for a thread + + Sets the calling thread's stream capture interaction mode to the value contained + in \p *mode, and overwrites \p *mode with the previous mode for the thread. To + facilitate deterministic behavior across function or module boundaries, callers + are encouraged to use this API in a push-pop fashion: \code +CUstreamCaptureMode mode = desiredMode; +cuThreadExchangeStreamCaptureMode(&mode); +... +cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode + \endcode + + During stream capture (see ::cuStreamBeginCapture), some actions, such as a call + to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is + not enqueued asynchronously to a stream, and is not observed by stream capture. + Therefore, if the sequence of operations captured via ::cuStreamBeginCapture + depended on the allocation being replayed whenever the graph is launched, the + captured graph would be invalid. + + Therefore, stream capture places restrictions on API calls that can be made within + or concurrently to a ::cuStreamBeginCapture-::cuStreamEndCapture sequence. This + behavior can be controlled via this API and flags to ::cuStreamBeginCapture. + + A thread's mode is one of the following: + - \p CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the local thread has + an ongoing capture sequence that was not initiated with + \p CU_STREAM_CAPTURE_MODE_RELAXED at \p cuStreamBeginCapture, or if any other thread + has a concurrent capture sequence initiated with \p CU_STREAM_CAPTURE_MODE_GLOBAL, + this thread is prohibited from potentially unsafe API calls. + - \p CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an ongoing capture + sequence not initiated with \p CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited + from potentially unsafe API calls. Concurrent capture sequences in other threads + are ignored. + - \p CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited from potentially + unsafe API calls. Note that the thread is still prohibited from API calls which + necessarily conflict with stream capture, for example, attempting ::cuEventQuery + on an event that was last recorded inside a capture sequence. + + \param mode - Pointer to mode value to swap with the current mode + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cuStreamBeginCapture*/ + fn cuThreadExchangeStreamCaptureMode( + mode: *mut cuda_types::CUstreamCaptureMode, + ) -> cuda_types::CUresult; + /** \brief Ends capture on a stream, returning the captured graph + + End capture on \p hStream, returning the captured graph via \p phGraph. + Capture must have been initiated on \p hStream via a call to ::cuStreamBeginCapture. + If capture was invalidated, due to a violation of the rules of stream capture, then + a NULL graph will be returned. + + If the \p mode argument to ::cuStreamBeginCapture was not + ::CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the same thread as + ::cuStreamBeginCapture. + + \param hStream - Stream to query + \param phGraph - The captured graph + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD + \notefnerr + + \sa + ::cuStreamCreate, + ::cuStreamBeginCapture, + ::cuStreamIsCapturing, + ::cuGraphDestroy*/ + fn cuStreamEndCapture_ptsz( + hStream: cuda_types::CUstream, + phGraph: *mut cuda_types::CUgraph, + ) -> cuda_types::CUresult; + /** \brief Returns a stream's capture status + + Return the capture status of \p hStream via \p captureStatus. After a successful + call, \p *captureStatus will contain one of the following: + - ::CU_STREAM_CAPTURE_STATUS_NONE: The stream is not capturing. + - ::CU_STREAM_CAPTURE_STATUS_ACTIVE: The stream is capturing. + - ::CU_STREAM_CAPTURE_STATUS_INVALIDATED: The stream was capturing but an error + has invalidated the capture sequence. The capture sequence must be terminated + with ::cuStreamEndCapture on the stream where it was initiated in order to + continue using \p hStream. + + Note that, if this is called on ::CU_STREAM_LEGACY (the "null stream") while + a blocking stream in the same context is capturing, it will return + ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and \p *captureStatus is unspecified + after the call. The blocking stream capture is not invalidated. + + When a blocking stream is capturing, the legacy stream is in an + unusable state until the blocking stream capture is terminated. The legacy + stream is not supported for stream capture, but attempted use would have an + implicit dependency on the capturing stream(s). + + \param hStream - Stream to query + \param captureStatus - Returns the stream's capture status + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + \notefnerr + + \sa + ::cuStreamCreate, + ::cuStreamBeginCapture, + ::cuStreamEndCapture*/ + fn cuStreamIsCapturing_ptsz( + hStream: cuda_types::CUstream, + captureStatus: *mut cuda_types::CUstreamCaptureStatus, + ) -> cuda_types::CUresult; + /** \brief Query a stream's capture state + + Query stream state related to stream capture. + + If called on ::CU_STREAM_LEGACY (the "null stream") while a stream not created + with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT. + + Valid data (other than capture status) is returned only if both of the following are true: + - the call returns CUDA_SUCCESS + - the returned capture status is ::CU_STREAM_CAPTURE_STATUS_ACTIVE + + \param hStream - The stream to query + \param captureStatus_out - Location to return the capture status of the stream; required + \param id_out - Optional location to return an id for the capture sequence, which is + unique over the lifetime of the process + \param graph_out - Optional location to return the graph being captured into. All + operations other than destroy and node removal are permitted on the graph + while the capture sequence is in progress. This API does not transfer + ownership of the graph, which is transferred or destroyed at + ::cuStreamEndCapture. Note that the graph handle may be invalidated before + end of capture for certain errors. Nodes that are or become + unreachable from the original stream at ::cuStreamEndCapture due to direct + actions on the graph do not trigger ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED. + \param dependencies_out - Optional location to store a pointer to an array of nodes. + The next node to be captured in the stream will depend on this set of nodes, + absent operations such as event wait which modify this set. The array pointer + is valid until the next API call which operates on the stream or until the + capture is terminated. The node handles may be copied out and are valid until + they or the graph is destroyed. The driver-owned array may also be passed + directly to APIs that operate on the graph (not the stream) without copying. + \param numDependencies_out - Optional location to store the size of the array + returned in dependencies_out. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + \note_graph_thread_safety + \notefnerr + + \sa + ::cuStreamGetCaptureInfo_v3 + ::cuStreamBeginCapture, + ::cuStreamIsCapturing, + ::cuStreamUpdateCaptureDependencies*/ + fn cuStreamGetCaptureInfo_v2_ptsz( + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + numDependencies_out: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Query a stream's capture state (12.3+) + + Query stream state related to stream capture. + + If called on ::CU_STREAM_LEGACY (the "null stream") while a stream not created + with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT. + + Valid data (other than capture status) is returned only if both of the following are true: + - the call returns CUDA_SUCCESS + - the returned capture status is ::CU_STREAM_CAPTURE_STATUS_ACTIVE + + If \p edgeData_out is non-NULL then \p dependencies_out must be as well. If + \p dependencies_out is non-NULL and \p edgeData_out is NULL, but there is non-zero edge + data for one or more of the current stream dependencies, the call will return + ::CUDA_ERROR_LOSSY_QUERY. + + \param hStream - The stream to query + \param captureStatus_out - Location to return the capture status of the stream; required + \param id_out - Optional location to return an id for the capture sequence, which is + unique over the lifetime of the process + \param graph_out - Optional location to return the graph being captured into. All + operations other than destroy and node removal are permitted on the graph + while the capture sequence is in progress. This API does not transfer + ownership of the graph, which is transferred or destroyed at + ::cuStreamEndCapture. Note that the graph handle may be invalidated before + end of capture for certain errors. Nodes that are or become + unreachable from the original stream at ::cuStreamEndCapture due to direct + actions on the graph do not trigger ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED. + \param dependencies_out - Optional location to store a pointer to an array of nodes. + The next node to be captured in the stream will depend on this set of nodes, + absent operations such as event wait which modify this set. The array pointer + is valid until the next API call which operates on the stream or until the + capture is terminated. The node handles may be copied out and are valid until + they or the graph is destroyed. The driver-owned array may also be passed + directly to APIs that operate on the graph (not the stream) without copying. + \param edgeData_out - Optional location to store a pointer to an array of graph edge + data. This array parallels \c dependencies_out; the next node to be added + has an edge to \c dependencies_out[i] with annotation \c edgeData_out[i] for + each \c i. The array pointer is valid until the next API call which operates + on the stream or until the capture is terminated. + \param numDependencies_out - Optional location to store the size of the array + returned in dependencies_out. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT, + ::CUDA_ERROR_LOSSY_QUERY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuStreamGetCaptureInfo + ::cuStreamBeginCapture, + ::cuStreamIsCapturing, + ::cuStreamUpdateCaptureDependencies*/ + fn cuStreamGetCaptureInfo_v3_ptsz( + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + edgeData_out: *mut *const cuda_types::CUgraphEdgeData, + numDependencies_out: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Update the set of dependencies in a capturing stream (11.3+) + + Modifies the dependency set of a capturing stream. The dependency set is the set + of nodes that the next captured node in the stream will depend on. + + Valid flags are ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES and + ::CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether the set passed to + the API is added to the existing set or replaces it. A flags value of 0 defaults + to ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES. + + Nodes that are removed from the dependency set via this API do not result in + ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are unreachable from the stream at + ::cuStreamEndCapture. + + Returns ::CUDA_ERROR_ILLEGAL_STATE if the stream is not capturing. + + This API is new in CUDA 11.3. Developers requiring compatibility across minor + versions to CUDA 11.0 should not use this API or provide a fallback. + + \param hStream - The stream to update + \param dependencies - The set of dependencies to add + \param numDependencies - The size of the dependencies array + \param flags - See above + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_ILLEGAL_STATE + + \sa + ::cuStreamBeginCapture, + ::cuStreamGetCaptureInfo,*/ + fn cuStreamUpdateCaptureDependencies_ptsz( + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + numDependencies: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Update the set of dependencies in a capturing stream (12.3+) + + Modifies the dependency set of a capturing stream. The dependency set is the set + of nodes that the next captured node in the stream will depend on along with the + edge data for those dependencies. + + Valid flags are ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES and + ::CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether the set passed to + the API is added to the existing set or replaces it. A flags value of 0 defaults + to ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES. + + Nodes that are removed from the dependency set via this API do not result in + ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are unreachable from the stream at + ::cuStreamEndCapture. + + Returns ::CUDA_ERROR_ILLEGAL_STATE if the stream is not capturing. + + \param hStream - The stream to update + \param dependencies - The set of dependencies to add + \param dependencyData - Optional array of data associated with each dependency. + \param numDependencies - The size of the dependencies array + \param flags - See above + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_ILLEGAL_STATE + + \sa + ::cuStreamBeginCapture, + ::cuStreamGetCaptureInfo,*/ + fn cuStreamUpdateCaptureDependencies_v2_ptsz( + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Attach memory to a stream asynchronously + + Enqueues an operation in \p hStream to specify stream association of + \p length bytes of memory starting from \p dptr. This function is a + stream-ordered operation, meaning that it is dependent on, and will + only take effect when, previous work in stream has completed. Any + previous association is automatically replaced. + + \p dptr must point to one of the following types of memories: + - managed memory declared using the __managed__ keyword or allocated with + ::cuMemAllocManaged. + - a valid host-accessible region of system-allocated pageable memory. This + type of memory may only be specified if the device associated with the + stream reports a non-zero value for the device attribute + ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. + + For managed allocations, \p length must be either zero or the entire + allocation's size. Both indicate that the entire allocation's stream + association is being changed. Currently, it is not possible to change stream + association for a portion of a managed allocation. + + For pageable host allocations, \p length must be non-zero. + + The stream association is specified using \p flags which must be + one of ::CUmemAttach_flags. + If the ::CU_MEM_ATTACH_GLOBAL flag is specified, the memory can be accessed + by any stream on any device. + If the ::CU_MEM_ATTACH_HOST flag is specified, the program makes a guarantee + that it won't access the memory on the device from any stream on a device that + has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + If the ::CU_MEM_ATTACH_SINGLE flag is specified and \p hStream is associated with + a device that has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, + the program makes a guarantee that it will only access the memory on the device + from \p hStream. It is illegal to attach singly to the NULL stream, because the + NULL stream is a virtual global stream and not a specific stream. An error will + be returned in this case. + + When memory is associated with a single stream, the Unified Memory system will + allow CPU access to this memory region so long as all operations in \p hStream + have completed, regardless of whether other streams are active. In effect, + this constrains exclusive ownership of the managed memory region by + an active GPU to per-stream activity instead of whole-GPU activity. + + Accessing memory on the device from streams that are not associated with + it will produce undefined results. No error checking is performed by the + Unified Memory system to ensure that kernels launched into other streams + do not access this region. + + It is a program's responsibility to order calls to ::cuStreamAttachMemAsync + via events, synchronization or other means to ensure legal access to memory + at all times. Data visibility and coherency will be changed appropriately + for all kernels which follow a stream-association change. + + If \p hStream is destroyed while data is associated with it, the association is + removed and the association reverts to the default visibility of the allocation + as specified at ::cuMemAllocManaged. For __managed__ variables, the default + association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a stream is an + asynchronous operation, and as a result, the change to default association won't + happen until all work in the stream has completed. + + \param hStream - Stream in which to enqueue the attach operation + \param dptr - Pointer to memory (must be a pointer to managed memory or + to a valid host-accessible region of system-allocated + pageable memory) + \param length - Length of memory + \param flags - Must be one of ::CUmemAttach_flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_SUPPORTED + \note_null_stream + \notefnerr + + \sa ::cuStreamCreate, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamWaitEvent, + ::cuStreamDestroy, + ::cuMemAllocManaged, + ::cudaStreamAttachMemAsync*/ + fn cuStreamAttachMemAsync_ptsz( + hStream: cuda_types::CUstream, + dptr: cuda_types::CUdeviceptr, + length: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Determine status of a compute stream + + Returns ::CUDA_SUCCESS if all operations in the stream specified by + \p hStream have completed, or ::CUDA_ERROR_NOT_READY if not. + + For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS + is equivalent to having called ::cuStreamSynchronize(). + + \param hStream - Stream to query status of + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_READY + \note_null_stream + \notefnerr + + \sa ::cuStreamCreate, + ::cuStreamWaitEvent, + ::cuStreamDestroy, + ::cuStreamSynchronize, + ::cuStreamAddCallback, + ::cudaStreamQuery*/ + fn cuStreamQuery_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + /** \brief Wait until a stream's tasks are completed + + Waits until the device has completed all operations in the stream specified + by \p hStream. If the context was created with the + ::CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block until the + stream is finished with all of its tasks. + + \param hStream - Stream to wait for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE + + \note_null_stream + \notefnerr + + \sa ::cuStreamCreate, + ::cuStreamDestroy, + ::cuStreamWaitEvent, + ::cuStreamQuery, + ::cuStreamAddCallback, + ::cudaStreamSynchronize*/ + fn cuStreamSynchronize_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + /** \brief Destroys a stream + + Destroys the stream specified by \p hStream. + + In case the device is still doing work in the stream \p hStream + when ::cuStreamDestroy() is called, the function will return immediately + and the resources associated with \p hStream will be released automatically + once the device has completed all work in \p hStream. + + \param hStream - Stream to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuStreamCreate, + ::cuStreamWaitEvent, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamAddCallback, + ::cudaStreamDestroy*/ + fn cuStreamDestroy_v2(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + /** \brief Copies attributes from source stream to destination stream. + + Copies attributes from source stream \p src to destination stream \p dst. + Both streams must have the same context. + + \param[out] dst Destination stream + \param[in] src Source stream + For list of attributes see ::CUstreamAttrID + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::CUaccessPolicyWindow*/ + fn cuStreamCopyAttributes_ptsz( + dst: cuda_types::CUstream, + src: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Queries stream attribute. + + Queries attribute \p attr from \p hStream and stores it in corresponding + member of \p value_out. + + \param[in] hStream + \param[in] attr + \param[out] value_out + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa + ::CUaccessPolicyWindow*/ + fn cuStreamGetAttribute_ptsz( + hStream: cuda_types::CUstream, + attr: cuda_types::CUstreamAttrID, + value_out: *mut cuda_types::CUstreamAttrValue, + ) -> cuda_types::CUresult; + /** \brief Sets stream attribute. + + Sets attribute \p attr on \p hStream from corresponding attribute of + \p value. The updated attribute will be applied to subsequent work + submitted to the stream. It will not affect previously submitted work. + + \param[out] hStream + \param[in] attr + \param[in] value + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa + ::CUaccessPolicyWindow*/ + fn cuStreamSetAttribute_ptsz( + hStream: cuda_types::CUstream, + attr: cuda_types::CUstreamAttrID, + value: *const cuda_types::CUstreamAttrValue, + ) -> cuda_types::CUresult; + /** \brief Creates an event + + Creates an event *phEvent for the current context with the flags specified via + \p Flags. Valid flags include: + - ::CU_EVENT_DEFAULT: Default event creation flag. + - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking + synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on + an event created with this flag will block until the event has actually + been recorded. + - ::CU_EVENT_DISABLE_TIMING: Specifies that the created event does not need + to record timing data. Events created with this flag specified and + the ::CU_EVENT_BLOCKING_SYNC flag not specified will provide the best + performance when used with ::cuStreamWaitEvent() and ::cuEventQuery(). + - ::CU_EVENT_INTERPROCESS: Specifies that the created event may be used as an + interprocess event by ::cuIpcGetEventHandle(). ::CU_EVENT_INTERPROCESS must + be specified along with ::CU_EVENT_DISABLE_TIMING. + + \param phEvent - Returns newly created event + \param Flags - Event creation flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa + ::cuEventRecord, + ::cuEventQuery, + ::cuEventSynchronize, + ::cuEventDestroy, + ::cuEventElapsedTime, + ::cudaEventCreate, + ::cudaEventCreateWithFlags*/ + fn cuEventCreate( + phEvent: *mut cuda_types::CUevent, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Records an event + + Captures in \p hEvent the contents of \p hStream at the time of this call. + \p hEvent and \p hStream must be from the same context. + Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then + examine or wait for completion of the work that was captured. Uses of + \p hStream after this call do not modify \p hEvent. See note on default + stream behavior for what is captured in the default case. + + ::cuEventRecord() can be called multiple times on the same event and + will overwrite the previously captured state. Other APIs such as + ::cuStreamWaitEvent() use the most recently captured state at the time + of the API call, and are not affected by later calls to + ::cuEventRecord(). Before the first call to ::cuEventRecord(), an + event represents an empty set of work, so for example ::cuEventQuery() + would return ::CUDA_SUCCESS. + + \param hEvent - Event to record + \param hStream - Stream to record event for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + \note_null_stream + \notefnerr + + \sa ::cuEventCreate, + ::cuEventQuery, + ::cuEventSynchronize, + ::cuStreamWaitEvent, + ::cuEventDestroy, + ::cuEventElapsedTime, + ::cudaEventRecord, + ::cuEventRecordWithFlags*/ + fn cuEventRecord_ptsz( + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Records an event + + Captures in \p hEvent the contents of \p hStream at the time of this call. + \p hEvent and \p hStream must be from the same context. + Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then + examine or wait for completion of the work that was captured. Uses of + \p hStream after this call do not modify \p hEvent. See note on default + stream behavior for what is captured in the default case. + + ::cuEventRecordWithFlags() can be called multiple times on the same event and + will overwrite the previously captured state. Other APIs such as + ::cuStreamWaitEvent() use the most recently captured state at the time + of the API call, and are not affected by later calls to + ::cuEventRecordWithFlags(). Before the first call to ::cuEventRecordWithFlags(), an + event represents an empty set of work, so for example ::cuEventQuery() + would return ::CUDA_SUCCESS. + + flags include: + - ::CU_EVENT_RECORD_DEFAULT: Default event creation flag. + - ::CU_EVENT_RECORD_EXTERNAL: Event is captured in the graph as an external + event node when performing stream capture. This flag is invalid outside + of stream capture. + + \param hEvent - Event to record + \param hStream - Stream to record event for + \param flags - See ::CUevent_capture_flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + \note_null_stream + \notefnerr + + \sa ::cuEventCreate, + ::cuEventQuery, + ::cuEventSynchronize, + ::cuStreamWaitEvent, + ::cuEventDestroy, + ::cuEventElapsedTime, + ::cuEventRecord, + ::cudaEventRecord*/ + fn cuEventRecordWithFlags_ptsz( + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Queries an event's status + + Queries the status of all work currently captured by \p hEvent. See + ::cuEventRecord() for details on what is captured by an event. + + Returns ::CUDA_SUCCESS if all captured work has been completed, or + ::CUDA_ERROR_NOT_READY if any captured work is incomplete. + + For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS + is equivalent to having called ::cuEventSynchronize(). + + \param hEvent - Event to query + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_READY + \notefnerr + + \sa ::cuEventCreate, + ::cuEventRecord, + ::cuEventSynchronize, + ::cuEventDestroy, + ::cuEventElapsedTime, + ::cudaEventQuery*/ + fn cuEventQuery(hEvent: cuda_types::CUevent) -> cuda_types::CUresult; + /** \brief Waits for an event to complete + + Waits until the completion of all work currently captured in \p hEvent. + See ::cuEventRecord() for details on what is captured by an event. + + Waiting for an event that was created with the ::CU_EVENT_BLOCKING_SYNC + flag will cause the calling CPU thread to block until the event has + been completed by the device. If the ::CU_EVENT_BLOCKING_SYNC flag has + not been set, then the CPU thread will busy-wait until the event has + been completed by the device. + + \param hEvent - Event to wait for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuEventCreate, + ::cuEventRecord, + ::cuEventQuery, + ::cuEventDestroy, + ::cuEventElapsedTime, + ::cudaEventSynchronize*/ + fn cuEventSynchronize(hEvent: cuda_types::CUevent) -> cuda_types::CUresult; + /** \brief Destroys an event + + Destroys the event specified by \p hEvent. + + An event may be destroyed before it is complete (i.e., while + ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY). In this case, the + call does not block on completion of the event, and any associated + resources will automatically be released asynchronously at completion. + + \param hEvent - Event to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuEventCreate, + ::cuEventRecord, + ::cuEventQuery, + ::cuEventSynchronize, + ::cuEventElapsedTime, + ::cudaEventDestroy*/ + fn cuEventDestroy_v2(hEvent: cuda_types::CUevent) -> cuda_types::CUresult; + /** \brief Computes the elapsed time between two events + + Computes the elapsed time between two events (in milliseconds with a + resolution of around 0.5 microseconds). + + If either event was last recorded in a non-NULL stream, the resulting time + may be greater than expected (even if both used the same stream handle). This + happens because the ::cuEventRecord() operation takes place asynchronously + and there is no guarantee that the measured latency is actually just between + the two events. Any number of other different stream operations could execute + in between the two measured events, thus altering the timing in a significant + way. + + If ::cuEventRecord() has not been called on either event then + ::CUDA_ERROR_INVALID_HANDLE is returned. If ::cuEventRecord() has been called + on both events but one or both of them has not yet been completed (that is, + ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY on at least one of the + events), ::CUDA_ERROR_NOT_READY is returned. If either event was created with + the ::CU_EVENT_DISABLE_TIMING flag, then this function will return + ::CUDA_ERROR_INVALID_HANDLE. + + \param pMilliseconds - Time between \p hStart and \p hEnd in ms + \param hStart - Starting event + \param hEnd - Ending event + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_READY, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuEventCreate, + ::cuEventRecord, + ::cuEventQuery, + ::cuEventSynchronize, + ::cuEventDestroy, + ::cudaEventElapsedTime*/ + fn cuEventElapsedTime( + pMilliseconds: *mut f32, + hStart: cuda_types::CUevent, + hEnd: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Imports an external memory object + + Imports an externally allocated memory object and returns + a handle to that in \p extMem_out. + + The properties of the handle being imported must be described in + \p memHandleDesc. The ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC structure + is defined as follows: + + \code +typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { +CUexternalMemoryHandleType type; +union { +int fd; +struct { +void *handle; +const void *name; +} win32; +const void *nvSciBufObject; +} handle; +unsigned long long size; +unsigned int flags; +} CUDA_EXTERNAL_MEMORY_HANDLE_DESC; + \endcode + + where ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type specifies the type + of handle being imported. ::CUexternalMemoryHandleType is + defined as: + + \code +typedef enum CUexternalMemoryHandleType_enum { +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1, +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2, +CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4, +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5, +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6, +CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7, +CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8 +} CUexternalMemoryHandleType; + \endcode + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a valid + file descriptor referencing a memory object. Ownership of + the file descriptor is transferred to the CUDA driver when the + handle is imported successfully. Performing any operations on the + file descriptor after it is imported results in undefined behavior. + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly one + of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + is not NULL, then it must represent a valid shared NT handle that + references a memory object. Ownership of this handle is + not transferred to CUDA after the import operation, so the + application must release the handle using the appropriate system + call. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + is not NULL, then it must point to a NULL-terminated array of + UTF-16 characters that refers to a memory object. + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + be non-NULL and + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + must be NULL. The handle specified must be a globally shared KMT + handle. This handle does not hold a reference to the underlying + object, and thus will be invalid when all references to the + memory object are destroyed. + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one + of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + is not NULL, then it must represent a valid shared NT handle that + is returned by ID3D12Device::CreateSharedHandle when referring to a + ID3D12Heap object. This handle holds a reference to the underlying + object. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + is not NULL, then it must point to a NULL-terminated array of + UTF-16 characters that refers to a ID3D12Heap object. + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly one + of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + is not NULL, then it must represent a valid shared NT handle that + is returned by ID3D12Device::CreateSharedHandle when referring to a + ID3D12Resource object. This handle holds a reference to the + underlying object. If + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + is not NULL, then it must point to a NULL-terminated array of + UTF-16 characters that refers to a ID3D12Resource object. + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE, then + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + represent a valid shared NT handle that is returned by + IDXGIResource1::CreateSharedHandle when referring to a + ID3D11Resource object. If + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + is not NULL, then it must point to a NULL-terminated array of + UTF-16 characters that refers to a ID3D11Resource object. + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT, then + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + represent a valid shared KMT handle that is returned by + IDXGIResource::GetSharedHandle when referring to a + ID3D11Resource object and + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + must be NULL. + + If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::nvSciBufObject must be non-NULL + and reference a valid NvSciBuf object. + If the NvSciBuf object imported into CUDA is also mapped by other drivers, then the + application must use ::cuWaitExternalSemaphoresAsync or ::cuSignalExternalSemaphoresAsync + as appropriate barriers to maintain coherence between CUDA and the other drivers. + See ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC and ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + for memory synchronization. + + + The size of the memory object must be specified in + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::size. + + Specifying the flag ::CUDA_EXTERNAL_MEMORY_DEDICATED in + ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::flags indicates that the + resource is a dedicated resource. The definition of what a + dedicated resource is outside the scope of this extension. + This flag must be set if ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type + is one of the following: + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + + \param extMem_out - Returned handle to an external memory object + \param memHandleDesc - Memory import handle descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OPERATING_SYSTEM + \notefnerr + + \note If the Vulkan memory imported into CUDA is mapped on the CPU then the + application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges + as well as appropriate Vulkan pipeline barriers to maintain coherence between + CPU and GPU. For more information on these APIs, please refer to "Synchronization + and Cache Control" chapter from Vulkan specification. + + \sa ::cuDestroyExternalMemory, + ::cuExternalMemoryGetMappedBuffer, + ::cuExternalMemoryGetMappedMipmappedArray*/ + fn cuImportExternalMemory( + extMem_out: *mut cuda_types::CUexternalMemory, + memHandleDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC, + ) -> cuda_types::CUresult; + /** \brief Maps a buffer onto an imported memory object + + Maps a buffer onto an imported memory object and returns a device + pointer in \p devPtr. + + The properties of the buffer being mapped must be described in + \p bufferDesc. The ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC structure is + defined as follows: + + \code +typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { +unsigned long long offset; +unsigned long long size; +unsigned int flags; +} CUDA_EXTERNAL_MEMORY_BUFFER_DESC; + \endcode + + where ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::offset is the offset in + the memory object where the buffer's base address is. + ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::size is the size of the buffer. + ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::flags must be zero. + + The offset and size have to be suitably aligned to match the + requirements of the external API. Mapping two buffers whose ranges + overlap may or may not result in the same virtual address being + returned for the overlapped portion. In such cases, the application + must ensure that all accesses to that region from the GPU are + volatile. Otherwise writes made via one address are not guaranteed + to be visible via the other address, even if they're issued by the + same thread. It is recommended that applications map the combined + range instead of mapping separate buffers and then apply the + appropriate offsets to the returned pointer to derive the + individual buffers. + + The returned pointer \p devPtr must be freed using ::cuMemFree. + + \param devPtr - Returned device pointer to buffer + \param extMem - Handle to external memory object + \param bufferDesc - Buffer descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuImportExternalMemory, + ::cuDestroyExternalMemory, + ::cuExternalMemoryGetMappedMipmappedArray*/ + fn cuExternalMemoryGetMappedBuffer( + devPtr: *mut cuda_types::CUdeviceptr, + extMem: cuda_types::CUexternalMemory, + bufferDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC, + ) -> cuda_types::CUresult; + /** \brief Maps a CUDA mipmapped array onto an external memory object + + Maps a CUDA mipmapped array onto an external object and returns a + handle to it in \p mipmap. + + The properties of the CUDA mipmapped array being mapped must be + described in \p mipmapDesc. The structure + ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as follows: + + \code +typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { +unsigned long long offset; +CUDA_ARRAY3D_DESCRIPTOR arrayDesc; +unsigned int numLevels; +} CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC; + \endcode + + where ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::offset is the + offset in the memory object where the base level of the mipmap + chain is. + ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc describes + the format, dimensions and type of the base level of the mipmap + chain. For further details on these parameters, please refer to the + documentation for ::cuMipmappedArrayCreate. Note that if the mipmapped + array is bound as a color target in the graphics API, then the flag + ::CUDA_ARRAY3D_COLOR_ATTACHMENT must be specified in + ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags. + ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels specifies + the total number of levels in the mipmap chain. + + If \p extMem was imported from a handle of type ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then + ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels must be equal to 1. + + The returned CUDA mipmapped array must be freed using ::cuMipmappedArrayDestroy. + + \param mipmap - Returned CUDA mipmapped array + \param extMem - Handle to external memory object + \param mipmapDesc - CUDA array descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuImportExternalMemory, + ::cuDestroyExternalMemory, + ::cuExternalMemoryGetMappedBuffer*/ + fn cuExternalMemoryGetMappedMipmappedArray( + mipmap: *mut cuda_types::CUmipmappedArray, + extMem: cuda_types::CUexternalMemory, + mipmapDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC, + ) -> cuda_types::CUresult; + /** \brief Destroys an external memory object. + + Destroys the specified external memory object. Any existing buffers + and CUDA mipmapped arrays mapped onto this object must no longer be + used and must be explicitly freed using ::cuMemFree and + ::cuMipmappedArrayDestroy respectively. + + \param extMem - External memory object to be destroyed + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuImportExternalMemory, + ::cuExternalMemoryGetMappedBuffer, + ::cuExternalMemoryGetMappedMipmappedArray*/ + fn cuDestroyExternalMemory( + extMem: cuda_types::CUexternalMemory, + ) -> cuda_types::CUresult; + /** \brief Imports an external semaphore + + Imports an externally allocated synchronization object and returns + a handle to that in \p extSem_out. + + The properties of the handle being imported must be described in + \p semHandleDesc. The ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is + defined as follows: + + \code +typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { +CUexternalSemaphoreHandleType type; +union { +int fd; +struct { +void *handle; +const void *name; +} win32; +const void* NvSciSyncObj; +} handle; +unsigned int flags; +} CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC; + \endcode + + where ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type specifies the type of + handle being imported. ::CUexternalSemaphoreHandleType is defined + as: + + \code +typedef enum CUexternalSemaphoreHandleType_enum { +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9, +CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10 +} CUexternalSemaphoreHandleType; + \endcode + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid + file descriptor referencing a synchronization object. Ownership of + the file descriptor is transferred to the CUDA driver when the + handle is imported successfully. Performing any operations on the + file descriptor after it is imported results in undefined behavior. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then exactly one + of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + NULL. If + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + is not NULL, then it must represent a valid shared NT handle that + references a synchronization object. Ownership of this handle is + not transferred to CUDA after the import operation, so the + application must release the handle using the appropriate system + call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + is not NULL, then it must name a valid synchronization object. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle must + be non-NULL and + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + must be NULL. The handle specified must be a globally shared KMT + handle. This handle does not hold a reference to the underlying + object, and thus will be invalid when all references to the + synchronization object are destroyed. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly one + of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + NULL. If + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + is not NULL, then it must represent a valid shared NT handle that + is returned by ID3D12Device::CreateSharedHandle when referring to a + ID3D12Fence object. This handle holds a reference to the underlying + object. If + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + is not NULL, then it must name a valid synchronization object that + refers to a valid ID3D12Fence object. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, then + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + represents a valid shared NT handle that is returned by + ID3D11Fence::CreateSharedHandle. If + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + is not NULL, then it must name a valid synchronization object that + refers to a valid ID3D11Fence object. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, then + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::nvSciSyncObj + represents a valid NvSciSyncObj. + + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, then + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + represents a valid shared NT handle that + is returned by IDXGIResource1::CreateSharedHandle when referring to + a IDXGIKeyedMutex object. If + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + is not NULL, then it must name a valid synchronization object that + refers to a valid IDXGIKeyedMutex object. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT, then + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + represents a valid shared KMT handle that + is returned by IDXGIResource::GetSharedHandle when referring to + a IDXGIKeyedMutex object and + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must be NULL. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, then + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid + file descriptor referencing a synchronization object. Ownership of + the file descriptor is transferred to the CUDA driver when the + handle is imported successfully. Performing any operations on the + file descriptor after it is imported results in undefined behavior. + + If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32, then exactly one + of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + NULL. If + ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + is not NULL, then it must represent a valid shared NT handle that + references a synchronization object. Ownership of this handle is + not transferred to CUDA after the import operation, so the + application must release the handle using the appropriate system + call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + is not NULL, then it must name a valid synchronization object. + + \param extSem_out - Returned handle to an external semaphore + \param semHandleDesc - Semaphore import handle descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OPERATING_SYSTEM + \notefnerr + + \sa ::cuDestroyExternalSemaphore, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync*/ + fn cuImportExternalSemaphore( + extSem_out: *mut cuda_types::CUexternalSemaphore, + semHandleDesc: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC, + ) -> cuda_types::CUresult; + /** \brief Signals a set of external semaphore objects + + Enqueues a signal operation on a set of externally allocated + semaphore object in the specified stream. The operations will be + executed when all prior operations in the stream complete. + + The exact semantics of signaling a semaphore depends on the type of + the object. + + If the semaphore object is any one of the following types: + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + then signaling the semaphore will set it to the signaled state. + + If the semaphore object is any one of the following types: + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + then the semaphore will be set to the value specified in + ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value. + + If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + this API sets ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence + to a value that can be used by subsequent waiters of the same NvSciSync object + to order operations with those currently submitted in \p stream. Such an update + will overwrite previous contents of + ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence. By default, + signaling such an external semaphore object causes appropriate memory synchronization + operations to be performed over all external memory objects that are imported as + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that any subsequent accesses + made by other importers of the same set of NvSciBuf memory object(s) are coherent. + These operations can be skipped by specifying the flag + ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC, which can be used as a + performance optimization when data coherency is not required. But specifying this + flag in scenarios where data coherency is required results in undefined behavior. + Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_SIGNAL, this API will return + CUDA_ERROR_NOT_SUPPORTED. + NvSciSyncFence associated with semaphore object of the type + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC can be deterministic. For this the + NvSciSyncAttrList used to create the semaphore object must have value of + NvSciSyncAttrKey_RequireDeterministicFences key set to true. Deterministic fences + allow users to enqueue a wait over the semaphore object even before corresponding + signal is enqueued. For such a semaphore object, CUDA guarantees that each signal + operation will increment the fence value by '1'. Users are expected to track count + of signals enqueued on the semaphore object and insert waits accordingly. When such + a semaphore object is signaled from multiple streams, due to concurrent stream + execution, it is possible that the order in which the semaphore gets signaled is + indeterministic. This could lead to waiters of the semaphore getting unblocked + incorrectly. Users are expected to handle such situations, either by not using the + same semaphore object with deterministic fence support enabled in different streams + or by adding explicit dependency amongst such streams so that the semaphore is + signaled in order. + + If the semaphore object is any one of the following types: + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + then the keyed mutex will be released with the key specified in + ::CUDA_EXTERNAL_SEMAPHORE_PARAMS::params::keyedmutex::key. + + \param extSemArray - Set of external semaphores to be signaled + \param paramsArray - Array of semaphore parameters + \param numExtSems - Number of semaphores to signal + \param stream - Stream to enqueue the signal operations in + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuImportExternalSemaphore, + ::cuDestroyExternalSemaphore, + ::cuWaitExternalSemaphoresAsync*/ + fn cuSignalExternalSemaphoresAsync_ptsz( + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Waits on a set of external semaphore objects + + Enqueues a wait operation on a set of externally allocated + semaphore object in the specified stream. The operations will be + executed when all prior operations in the stream complete. + + The exact semantics of waiting on a semaphore depends on the type + of the object. + + If the semaphore object is any one of the following types: + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + then waiting on the semaphore will wait until the semaphore reaches + the signaled state. The semaphore will then be reset to the + unsignaled state. Therefore for every signal operation, there can + only be one wait operation. + + If the semaphore object is any one of the following types: + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + then waiting on the semaphore will wait until the value of the + semaphore is greater than or equal to + ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value. + + If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + then, waiting on the semaphore will wait until the + ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence is signaled by the + signaler of the NvSciSyncObj that was associated with this semaphore object. + By default, waiting on such an external semaphore object causes appropriate + memory synchronization operations to be performed over all external memory objects + that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that + any subsequent accesses made by other importers of the same set of NvSciBuf memory + object(s) are coherent. These operations can be skipped by specifying the flag + ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC, which can be used as a + performance optimization when data coherency is not required. But specifying this + flag in scenarios where data coherency is required results in undefined behavior. + Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_WAIT, this API will return + CUDA_ERROR_NOT_SUPPORTED. + + If the semaphore object is any one of the following types: + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + then the keyed mutex will be acquired when it is released with the key + specified in ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::key + or until the timeout specified by + ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::timeoutMs + has lapsed. The timeout interval can either be a finite value + specified in milliseconds or an infinite value. In case an infinite + value is specified the timeout never elapses. The windows INFINITE + macro must be used to specify infinite timeout. + + \param extSemArray - External semaphores to be waited on + \param paramsArray - Array of semaphore parameters + \param numExtSems - Number of semaphores to wait on + \param stream - Stream to enqueue the wait operations in + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_TIMEOUT + \notefnerr + + \sa ::cuImportExternalSemaphore, + ::cuDestroyExternalSemaphore, + ::cuSignalExternalSemaphoresAsync*/ + fn cuWaitExternalSemaphoresAsync_ptsz( + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Destroys an external semaphore + + Destroys an external semaphore object and releases any references + to the underlying resource. Any outstanding signals or waits must + have completed before the semaphore is destroyed. + + \param extSem - External semaphore to be destroyed + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa ::cuImportExternalSemaphore, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync*/ + fn cuDestroyExternalSemaphore( + extSem: cuda_types::CUexternalSemaphore, + ) -> cuda_types::CUresult; + /** \brief Wait on a memory location + + Enqueues a synchronization of the stream on the given memory location. Work + ordered after the operation will block until the given condition on the + memory is satisfied. By default, the condition is to wait for + (int32_t)(*addr - value) >= 0, a cyclic greater-or-equal. + Other condition types can be specified via \p flags. + + If the memory was registered via ::cuMemHostRegister(), the device pointer + should be obtained with ::cuMemHostGetDevicePointer(). This function cannot + be used with managed memory (::cuMemAllocManaged). + + Support for CU_STREAM_WAIT_VALUE_NOR can be queried with ::cuDeviceGetAttribute() and + ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. + + \note + Warning: + Improper use of this API may deadlock the application. Synchronization + ordering established through this API is not visible to CUDA. CUDA tasks + that are (even indirectly) ordered by this API should also have that order + expressed with CUDA-visible dependencies such as events. This ensures that + the scheduler does not serialize them in an improper order. + + \param stream The stream to synchronize on the memory location. + \param addr The memory location to wait on. + \param value The value to compare with the memory location. + \param flags See ::CUstreamWaitValue_flags. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuStreamWaitValue64, + ::cuStreamWriteValue32, + ::cuStreamWriteValue64, + ::cuStreamBatchMemOp, + ::cuMemHostRegister, + ::cuStreamWaitEvent*/ + fn cuStreamWaitValue32_v2_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Wait on a memory location + + Enqueues a synchronization of the stream on the given memory location. Work + ordered after the operation will block until the given condition on the + memory is satisfied. By default, the condition is to wait for + (int64_t)(*addr - value) >= 0, a cyclic greater-or-equal. + Other condition types can be specified via \p flags. + + If the memory was registered via ::cuMemHostRegister(), the device pointer + should be obtained with ::cuMemHostGetDevicePointer(). + + Support for this can be queried with ::cuDeviceGetAttribute() and + ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + + \note + Warning: + Improper use of this API may deadlock the application. Synchronization + ordering established through this API is not visible to CUDA. CUDA tasks + that are (even indirectly) ordered by this API should also have that order + expressed with CUDA-visible dependencies such as events. This ensures that + the scheduler does not serialize them in an improper order. + + \param stream The stream to synchronize on the memory location. + \param addr The memory location to wait on. + \param value The value to compare with the memory location. + \param flags See ::CUstreamWaitValue_flags. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuStreamWaitValue32, + ::cuStreamWriteValue32, + ::cuStreamWriteValue64, + ::cuStreamBatchMemOp, + ::cuMemHostRegister, + ::cuStreamWaitEvent*/ + fn cuStreamWaitValue64_v2_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Write a value to memory + + Write a value to memory. + + If the memory was registered via ::cuMemHostRegister(), the device pointer + should be obtained with ::cuMemHostGetDevicePointer(). This function cannot + be used with managed memory (::cuMemAllocManaged). + + \param stream The stream to do the write in. + \param addr The device address to write to. + \param value The value to write. + \param flags See ::CUstreamWriteValue_flags. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuStreamWriteValue64, + ::cuStreamWaitValue32, + ::cuStreamWaitValue64, + ::cuStreamBatchMemOp, + ::cuMemHostRegister, + ::cuEventRecord*/ + fn cuStreamWriteValue32_v2_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Write a value to memory + + Write a value to memory. + + If the memory was registered via ::cuMemHostRegister(), the device pointer + should be obtained with ::cuMemHostGetDevicePointer(). + + Support for this can be queried with ::cuDeviceGetAttribute() and + ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + + \param stream The stream to do the write in. + \param addr The device address to write to. + \param value The value to write. + \param flags See ::CUstreamWriteValue_flags. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuStreamWriteValue32, + ::cuStreamWaitValue32, + ::cuStreamWaitValue64, + ::cuStreamBatchMemOp, + ::cuMemHostRegister, + ::cuEventRecord*/ + fn cuStreamWriteValue64_v2_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Batch operations to synchronize the stream via memory operations + + This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32(). + Batching operations may avoid some performance overhead in both the API call + and the device execution versus adding them to the stream in separate API + calls. The operations are enqueued in the order they appear in the array. + + See ::CUstreamBatchMemOpType for the full set of supported operations, and + ::cuStreamWaitValue32(), ::cuStreamWaitValue64(), ::cuStreamWriteValue32(), + and ::cuStreamWriteValue64() for details of specific operations. + + See related APIs for details on querying support for specific operations. + + \note + Warning: + Improper use of this API may deadlock the application. Synchronization + ordering established through this API is not visible to CUDA. CUDA tasks + that are (even indirectly) ordered by this API should also have that order + expressed with CUDA-visible dependencies such as events. This ensures that + the scheduler does not serialize them in an improper order. For more + information, see the Stream Memory Operations section in the programming + guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html). + + \param stream The stream to enqueue the operations in. + \param count The number of operations in the array. Must be less than 256. + \param paramArray The types and parameters of the individual operations. + \param flags Reserved for future expansion; must be 0. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa ::cuStreamWaitValue32, + ::cuStreamWaitValue64, + ::cuStreamWriteValue32, + ::cuStreamWriteValue64, + ::cuMemHostRegister*/ + fn cuStreamBatchMemOp_v2_ptsz( + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Returns information about a function + + Returns in \p *pi the integer value of the attribute \p attrib on the kernel + given by \p hfunc. The supported attributes are: + - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads + per block, beyond which a launch of the function would fail. This number + depends on both the function and the device on which the function is + currently loaded. + - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of + statically-allocated shared memory per block required by this function. + This does not include dynamically-allocated shared memory requested by + the user at runtime. + - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated + constant memory required by this function. + - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory + used by each thread of this function. + - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread + of this function. + - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for + which the function was compiled. This value is the major PTX version * 10 + + the minor PTX version, so a PTX version 1.3 function would return the + value 13. Note that this may return the undefined value of 0 for cubins + compiled prior to CUDA 3.0. + - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for + which the function was compiled. This value is the major binary + version * 10 + the minor binary version, so a binary version 1.3 function + would return the value 13. Note that this will return a value of 10 for + legacy cubins that do not have a properly-encoded binary architecture + version. + - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the function has + been compiled with user specified option "-Xptxas --dlcm=ca" set . + - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of + dynamically-allocated shared memory. + - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1 + cache split ratio in percent of total shared memory. + - ::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this attribute is set, the + kernel must launch with a valid cluster size specified. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + blocks. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + blocks. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + blocks. + - ::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: Indicates whether + the function can be launched with non-portable cluster size. 1 is allowed, + 0 is disallowed. A non-portable cluster size may only function on the + specific SKUs the program is tested on. The launch might fail if the + program is run on a different hardware platform. CUDA API provides + cudaOccupancyMaxActiveClusters to assist with checking whether the desired + size can be launched on the current device. A portable cluster size is + guaranteed to be functional on all compute capabilities higher than the + target compute capability. The portable cluster size for sm_90 is 8 blocks + per cluster. This value may increase for future compute capabilities. The + specific hardware unit may support higher cluster sizes that’s not + guaranteed to be portable. + - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + + With a few execeptions, function attributes may also be queried on unloaded + function handles returned from ::cuModuleEnumerateFunctions. + ::CUDA_ERROR_FUNCTION_NOT_LOADED is returned if the attribute requires a fully + loaded function but the function is not loaded. The loading state of a function + may be queried using ::cuFuncIsloaded. ::cuFuncLoad may be called to explicitly + load a function before querying the following attributes that require the function + to be loaded: + - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK + - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES + - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES + + \param pi - Returned attribute value + \param attrib - Attribute requested + \param hfunc - Function to query attribute of + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_FUNCTION_NOT_LOADED + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuFuncSetCacheConfig, + ::cuLaunchKernel, + ::cudaFuncGetAttributes, + ::cudaFuncSetAttribute, + ::cuFuncIsLoaded, + ::cuFuncLoad, + ::cuKernelGetAttribute*/ + fn cuFuncGetAttribute( + pi: *mut ::core::ffi::c_int, + attrib: cuda_types::CUfunction_attribute, + hfunc: cuda_types::CUfunction, + ) -> cuda_types::CUresult; + /** \brief Sets information about a function + + This call sets the value of a specified attribute \p attrib on the kernel given + by \p hfunc to an integer value specified by \p val + This function returns CUDA_SUCCESS if the new value of the attribute could be + successfully set. If the set fails, this call will return an error. + Not all attributes can have values set. Attempting to set a value on a read-only + attribute will result in an error (CUDA_ERROR_INVALID_VALUE) + + Supported attributes for the cuFuncSetAttribute call are: + - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This maximum size in bytes of + dynamically-allocated shared memory. The value should contain the requested + maximum size of dynamically-allocated shared memory. The sum of this value and + the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the + device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN. + The maximal size of requestable dynamic shared memory may differ by GPU + architecture. + - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1 + cache and shared memory use the same hardware resources, this sets the shared memory + carveout preference, in percent of the total shared memory. + See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + This is only a hint, and the driver can choose a different ratio if required to execute the function. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + blocks. The width, height, and depth values must either all be 0 or all be + positive. The validity of the cluster dimensions is checked at launch time. + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + blocks. The width, height, and depth values must either all be 0 or all be + positive. The validity of the cluster dimensions is checked at launch time. + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + blocks. The width, height, and depth values must either all be 0 or all be + positive. The validity of the cluster dimensions is checked at launch time. + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + + \param hfunc - Function to query attribute of + \param attrib - Attribute requested + \param value - The value to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuFuncSetCacheConfig, + ::cuLaunchKernel, + ::cudaFuncGetAttributes, + ::cudaFuncSetAttribute, + ::cuKernelSetAttribute*/ + fn cuFuncSetAttribute( + hfunc: cuda_types::CUfunction, + attrib: cuda_types::CUfunction_attribute, + value: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Sets the preferred cache configuration for a device function + + On devices where the L1 cache and shared memory use the same hardware + resources, this sets through \p config the preferred cache configuration for + the device function \p hfunc. This is only a preference. The driver will use + the requested configuration if possible, but it is free to choose a different + configuration if required to execute \p hfunc. Any context-wide preference + set via ::cuCtxSetCacheConfig() will be overridden by this per-function + setting unless the per-function setting is ::CU_FUNC_CACHE_PREFER_NONE. In + that case, the current context-wide setting will be used. + + This setting does nothing on devices where the size of the L1 cache and + shared memory are fixed. + + Launching a kernel with a different preference than the most recent + preference setting may insert a device-side synchronization point. + + + The supported cache configurations are: + - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + + \param hfunc - Kernel to configure cache for + \param config - Requested cache configuration + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuFuncGetAttribute, + ::cuLaunchKernel, + ::cudaFuncSetCacheConfig, + ::cuKernelSetCacheConfig*/ + fn cuFuncSetCacheConfig( + hfunc: cuda_types::CUfunction, + config: cuda_types::CUfunc_cache, + ) -> cuda_types::CUresult; + /** \brief Returns a module handle + + Returns in \p *hmod the handle of the module that function \p hfunc + is located in. The lifetime of the module corresponds to the lifetime of + the context it was loaded in or until the module is explicitly unloaded. + + The CUDA runtime manages its own modules loaded into the primary context. + If the handle returned by this API refers to a module loaded by the CUDA runtime, + calling ::cuModuleUnload() on that module will result in undefined behavior. + + \param hmod - Returned module handle + \param hfunc - Function to retrieve module for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_FOUND + \notefnerr +*/ + fn cuFuncGetModule( + hmod: *mut cuda_types::CUmodule, + hfunc: cuda_types::CUfunction, + ) -> cuda_types::CUresult; + /** \brief Returns the function name for a ::CUfunction handle + + Returns in \p **name the function name associated with the function handle \p hfunc . + The function name is returned as a null-terminated string. The returned name is only + valid when the function handle is valid. If the module is unloaded or reloaded, one + must call the API again to get the updated name. This API may return a mangled name if + the function is not declared as having C linkage. If either \p **name or \p hfunc + is NULL, ::CUDA_ERROR_INVALID_VALUE is returned. + + \param name - The returned name of the function + \param hfunc - The function handle to retrieve the name for + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr +*/ + fn cuFuncGetName( + name: *mut *const ::core::ffi::c_char, + hfunc: cuda_types::CUfunction, + ) -> cuda_types::CUresult; + /** \brief Returns the offset and size of a kernel parameter in the device-side parameter layout + + Queries the kernel parameter at \p paramIndex into \p func's list of parameters, and returns + in \p paramOffset and \p paramSize the offset and size, respectively, where the parameter + will reside in the device-side parameter layout. This information can be used to update kernel + node parameters from the device via ::cudaGraphKernelNodeSetParam() and + ::cudaGraphKernelNodeUpdatesApply(). \p paramIndex must be less than the number of parameters + that \p func takes. \p paramSize can be set to NULL if only the parameter offset is desired. + + \param func - The function to query + \param paramIndex - The parameter index to query + \param paramOffset - Returns the offset into the device-side parameter layout at which the parameter resides + \param paramSize - Optionally returns the size of the parameter in the device-side parameter layout + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \notefnerr + + \sa ::cuKernelGetParamInfo*/ + fn cuFuncGetParamInfo( + func: cuda_types::CUfunction, + paramIndex: usize, + paramOffset: *mut usize, + paramSize: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns if the function is loaded + + Returns in \p state the loading state of \p function. + + \param state - returned loading state + \param function - the function to check + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuFuncLoad, + ::cuModuleEnumerateFunctions*/ + fn cuFuncIsLoaded( + state: *mut cuda_types::CUfunctionLoadingState, + function: cuda_types::CUfunction, + ) -> cuda_types::CUresult; + /** \brief Loads a function + + Finalizes function loading for \p function. Calling this API with a + fully loaded function has no effect. + + \param function - the function to load + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuModuleEnumerateFunctions, + ::cuFuncIsLoaded*/ + fn cuFuncLoad(function: cuda_types::CUfunction) -> cuda_types::CUresult; + /** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel + + Invokes the function ::CUfunction or the kernel ::CUkernel \p f + on a \p gridDimX x \p gridDimY x \p gridDimZ grid of blocks. + Each block contains \p blockDimX x \p blockDimY x + \p blockDimZ threads. + + \p sharedMemBytes sets the amount of dynamic shared memory that will be + available to each thread block. + + Kernel parameters to \p f can be specified in one of two ways: + + 1) Kernel parameters can be specified via \p kernelParams. If \p f + has N parameters, then \p kernelParams needs to be an array of N + pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] + must point to a region of memory from which the actual kernel + parameter will be copied. The number of kernel parameters and their + offsets and sizes do not need to be specified as that information is + retrieved directly from the kernel's image. + + 2) Kernel parameters can also be packaged by the application into + a single buffer that is passed in via the \p extra parameter. + This places the burden on the application of knowing each kernel + parameter's size and alignment/padding within the buffer. Here is + an example of using the \p extra parameter in this manner: + \code +size_t argBufferSize; +char argBuffer[256]; + +// populate argBuffer and argBufferSize + +void *config[] = { +CU_LAUNCH_PARAM_BUFFER_POINTER, argBuffer, +CU_LAUNCH_PARAM_BUFFER_SIZE, &argBufferSize, +CU_LAUNCH_PARAM_END +}; +status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config); + \endcode + + The \p extra parameter exists to allow ::cuLaunchKernel to take + additional less commonly used arguments. \p extra specifies a list of + names of extra settings and their corresponding values. Each extra + setting name is immediately followed by the corresponding value. The + list must be terminated with either NULL or ::CU_LAUNCH_PARAM_END. + + - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + array; + - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + value in \p extra will be a pointer to a buffer containing all + the kernel parameters for launching kernel \p f; + - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + value in \p extra will be a pointer to a size_t containing the + size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER; + + The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel + parameters are specified with both \p kernelParams and \p extra + (i.e. both \p kernelParams and \p extra are non-NULL). + + Calling ::cuLaunchKernel() invalidates the persistent function state + set through the following deprecated APIs: + ::cuFuncSetBlockShape(), + ::cuFuncSetSharedSize(), + ::cuParamSetSize(), + ::cuParamSeti(), + ::cuParamSetf(), + ::cuParamSetv(). + + Note that to use ::cuLaunchKernel(), the kernel \p f must either have + been compiled with toolchain version 3.2 or later so that it will + contain kernel parameter information, or have no kernel parameters. + If either of these conditions is not met, then ::cuLaunchKernel() will + return ::CUDA_ERROR_INVALID_IMAGE. + + Note that the API can also be used to launch context-less kernel ::CUkernel + by querying the handle using ::cuLibraryGetKernel() and then passing it + to the API by casting to ::CUfunction. Here, the context to launch + the kernel on will either be taken from the specified stream \p hStream + or the current context in case of NULL stream. + + \param f - Function ::CUfunction or Kernel ::CUkernel to launch + \param gridDimX - Width of grid in blocks + \param gridDimY - Height of grid in blocks + \param gridDimZ - Depth of grid in blocks + \param blockDimX - X dimension of each thread block + \param blockDimY - Y dimension of each thread block + \param blockDimZ - Z dimension of each thread block + \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes + \param hStream - Stream identifier + \param kernelParams - Array of pointers to kernel parameters + \param extra - Extra options + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_IMAGE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_LAUNCH_FAILED, + ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_NOT_FOUND + \note_null_stream + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuFuncSetCacheConfig, + ::cuFuncGetAttribute, + ::cudaLaunchKernel, + ::cuLibraryGetKernel, + ::cuKernelSetCacheConfig, + ::cuKernelGetAttribute, + ::cuKernelSetAttribute*/ + fn cuLaunchKernel_ptsz( + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel with launch-time configuration + + Invokes the function ::CUfunction or the kernel ::CUkernel \p f with the specified launch-time configuration + \p config. + + The ::CUlaunchConfig structure is defined as: + + \code + typedef struct CUlaunchConfig_st { + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + CUlaunchAttribute *attrs; + unsigned int numAttrs; + } CUlaunchConfig; + \endcode + + where: + - ::CUlaunchConfig::gridDimX is the width of the grid in blocks. + - ::CUlaunchConfig::gridDimY is the height of the grid in blocks. + - ::CUlaunchConfig::gridDimZ is the depth of the grid in blocks. + - ::CUlaunchConfig::blockDimX is the X dimension of each thread block. + - ::CUlaunchConfig::blockDimX is the Y dimension of each thread block. + - ::CUlaunchConfig::blockDimZ is the Z dimension of each thread block. + - ::CUlaunchConfig::sharedMemBytes is the dynamic shared-memory size per + thread block in bytes. + - ::CUlaunchConfig::hStream is the handle to the stream to perform the launch + in. The CUDA context associated with this stream must match that associated + with function f. + - ::CUlaunchConfig::attrs is an array of ::CUlaunchConfig::numAttrs + continguous ::CUlaunchAttribute elements. The value of this pointer is not + considered if ::CUlaunchConfig::numAttrs is zero. However, in that case, it + is recommended to set the pointer to NULL. + - ::CUlaunchConfig::numAttrs is the number of attributes populating the + first ::CUlaunchConfig::numAttrs positions of the ::CUlaunchConfig::attrs + array. + + Launch-time configuration is specified by adding entries to + ::CUlaunchConfig::attrs. Each entry is an attribute ID and a corresponding + attribute value. + + The ::CUlaunchAttribute structure is defined as: + \code + typedef struct CUlaunchAttribute_st { + CUlaunchAttributeID id; + CUlaunchAttributeValue value; + } CUlaunchAttribute; + \endcode + where: + - ::CUlaunchAttribute::id is a unique enum identifying the attribute. + - ::CUlaunchAttribute::value is a union that hold the attribute value. + + An example of using the \p config parameter: + \code + CUlaunchAttribute coopAttr = {.id = CU_LAUNCH_ATTRIBUTE_COOPERATIVE, + .value = 1}; + CUlaunchConfig config = {... // set block and grid dimensions + .attrs = &coopAttr, + .numAttrs = 1}; + + cuLaunchKernelEx(&config, kernel, NULL, NULL); + \endcode + + The ::CUlaunchAttributeID enum is defined as: + \code + typedef enum CUlaunchAttributeID_enum { + CU_LAUNCH_ATTRIBUTE_IGNORE = 0, + CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1, + CU_LAUNCH_ATTRIBUTE_COOPERATIVE = 2, + CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3, + CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = 4, + CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5, + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = 6, + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = 7, + CU_LAUNCH_ATTRIBUTE_PRIORITY = 8, + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9, + CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = 10, + CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT = 12, + CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE = 13, + } CUlaunchAttributeID; + \endcode + + and the corresponding ::CUlaunchAttributeValue union as : + \code + typedef union CUlaunchAttributeValue_union { + CUaccessPolicyWindow accessPolicyWindow; + int cooperative; + CUsynchronizationPolicy syncPolicy; + struct { + unsigned int x; + unsigned int y; + unsigned int z; + } clusterDim; + CUclusterSchedulingPolicy clusterSchedulingPolicyPreference; + int programmaticStreamSerializationAllowed; + struct { + CUevent event; + int flags; + int triggerAtBlockStart; + } programmaticEvent; + int priority; + CUlaunchMemSyncDomainMap memSyncDomainMap; + CUlaunchMemSyncDomain memSyncDomain; + struct { + CUevent event; + int flags; + } launchCompletionEvent; + struct { + int deviceUpdatable; + CUgraphDeviceNode devNode; + } deviceUpdatableKernelNode; + } CUlaunchAttributeValue; + \endcode + + Setting ::CU_LAUNCH_ATTRIBUTE_COOPERATIVE to a non-zero value causes the + kernel launch to be a cooperative launch, with exactly the same usage and + semantics of ::cuLaunchCooperativeKernel. + + Setting ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION to a non-zero + values causes the kernel to use programmatic means to resolve its stream + dependency -- enabling the CUDA runtime to opportunistically allow the grid's + execution to overlap with the previous kernel in the stream, if that kernel + requests the overlap. + + ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT records an event along with the + kernel launch. Event recorded through this launch attribute is guaranteed to + only trigger after all block in the associated kernel trigger the event. A + block can trigger the event through PTX launchdep.release or CUDA builtin + function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be + inserted at the beginning of each block's execution if triggerAtBlockStart is + set to non-0. Note that dependents (including the CPU thread calling + cuEventSynchronize()) are not guaranteed to observe the release precisely + when it is released. For example, cuEventSynchronize() may only observe the + event trigger long after the associated kernel has completed. This recording + type is primarily meant for establishing programmatic dependency between + device tasks. The event supplied must not be an interprocess or interop + event. The event must disable timing (i.e. created with + ::CU_EVENT_DISABLE_TIMING flag set). + + ::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT records an event along with + the kernel launch. Nominally, the event is triggered once all blocks of the + kernel have begun execution. Currently this is a best effort. If a kernel B + has a launch completion dependency on a kernel A, B may wait until A is + complete. Alternatively, blocks of B may begin before all blocks of A have + begun, for example: + + - If B can claim execution resources unavaiable to A, for example if they + run on different GPUs. + - If B is a higher priority than A. + + Exercise caution if such an ordering inversion could lead to deadlock. The + event supplied must not be an interprocess or interop event. The event must + disable timing (i.e. must be created with the ::CU_EVENT_DISABLE_TIMING flag + set). + + Setting ::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE to 1 + on a captured launch causes the resulting kernel node to be device-updatable. + This attribute is specific to graphs, and passing it to a launch in a + non-capturing stream results in an error. Passing a value other than 0 or 1 is + not allowed. + + On success, a handle will be returned via + ::CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be passed + to the various device-side update functions to update the node's kernel parameters + from within another kernel. For more information on the types of device updates + that can be made, as well as the relevant limitations thereof, see + ::cudaGraphKernelNodeUpdatesApply. + + Kernel nodes which are device-updatable have additional restrictions compared to regular + kernel nodes. Firstly, device-updatable nodes cannot be removed from their graph via + ::cuGraphDestroyNode. Additionally, once opted-in to this functionality, a node cannot + opt out, and any attempt to set the attribute to 0 will result in an error. Graphs + containing one or more device-updatable node also do not allow multiple instantiation. + + + The effect of other attributes is consistent with their effect when set via + persistent APIs. + + See ::cuStreamSetAttribute for + - ::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW + - ::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY + + See ::cuFuncSetAttribute for + - ::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION + - ::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + + Kernel parameters to \p f can be specified in the same ways that they can be + using ::cuLaunchKernel. + + Note that the API can also be used to launch context-less kernel ::CUkernel + by querying the handle using ::cuLibraryGetKernel() and then passing it + to the API by casting to ::CUfunction. Here, the context to launch + the kernel on will either be taken from the specified stream ::CUlaunchConfig::hStream + or the current context in case of NULL stream. + + \param config - Config to launch + \param f - Function ::CUfunction or Kernel ::CUkernel to launch + \param kernelParams - Array of pointers to kernel parameters + \param extra - Extra options + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_IMAGE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_LAUNCH_FAILED, + ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_NOT_FOUND + \note_null_stream + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuFuncSetCacheConfig, + ::cuFuncGetAttribute, + ::cudaLaunchKernel, + ::cudaLaunchKernelEx, + ::cuLibraryGetKernel, + ::cuKernelSetCacheConfig, + ::cuKernelGetAttribute, + ::cuKernelSetAttribute*/ + fn cuLaunchKernelEx_ptsz( + config: *const cuda_types::CUlaunchConfig, + f: cuda_types::CUfunction, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel where thread blocks + can cooperate and synchronize as they execute + + Invokes the function ::CUfunction or the kernel ::CUkernel \p f on a \p gridDimX x \p gridDimY x \p gridDimZ + grid of blocks. Each block contains \p blockDimX x \p blockDimY x + \p blockDimZ threads. + + Note that the API can also be used to launch context-less kernel ::CUkernel + by querying the handle using ::cuLibraryGetKernel() and then passing it + to the API by casting to ::CUfunction. Here, the context to launch + the kernel on will either be taken from the specified stream \p hStream + or the current context in case of NULL stream. + + \p sharedMemBytes sets the amount of dynamic shared memory that will be + available to each thread block. + + The device on which this kernel is invoked must have a non-zero value for + the device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH. + + The total number of blocks launched cannot exceed the maximum number of blocks per + multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or + ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. + + The kernel cannot make use of CUDA dynamic parallelism. + + Kernel parameters must be specified via \p kernelParams. If \p f + has N parameters, then \p kernelParams needs to be an array of N + pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] + must point to a region of memory from which the actual kernel + parameter will be copied. The number of kernel parameters and their + offsets and sizes do not need to be specified as that information is + retrieved directly from the kernel's image. + + Calling ::cuLaunchCooperativeKernel() sets persistent function state that is + the same as function state set through ::cuLaunchKernel API + + When the kernel \p f is launched via ::cuLaunchCooperativeKernel(), the previous + block shape, shared size and parameter info associated with \p f + is overwritten. + + Note that to use ::cuLaunchCooperativeKernel(), the kernel \p f must either have + been compiled with toolchain version 3.2 or later so that it will + contain kernel parameter information, or have no kernel parameters. + If either of these conditions is not met, then ::cuLaunchCooperativeKernel() will + return ::CUDA_ERROR_INVALID_IMAGE. + + Note that the API can also be used to launch context-less kernel ::CUkernel + by querying the handle using ::cuLibraryGetKernel() and then passing it + to the API by casting to ::CUfunction. Here, the context to launch + the kernel on will either be taken from the specified stream \p hStream + or the current context in case of NULL stream. + + \param f - Function ::CUfunction or Kernel ::CUkernel to launch + \param gridDimX - Width of grid in blocks + \param gridDimY - Height of grid in blocks + \param gridDimZ - Depth of grid in blocks + \param blockDimX - X dimension of each thread block + \param blockDimY - Y dimension of each thread block + \param blockDimZ - Z dimension of each thread block + \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes + \param hStream - Stream identifier + \param kernelParams - Array of pointers to kernel parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_IMAGE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_LAUNCH_FAILED, + ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + ::CUDA_ERROR_NOT_FOUND + \note_null_stream + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuFuncSetCacheConfig, + ::cuFuncGetAttribute, + ::cuLaunchCooperativeKernelMultiDevice, + ::cudaLaunchCooperativeKernel, + ::cuLibraryGetKernel, + ::cuKernelSetCacheConfig, + ::cuKernelGetAttribute, + ::cuKernelSetAttribute*/ + fn cuLaunchCooperativeKernel_ptsz( + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute + + \deprecated This function is deprecated as of CUDA 11.3. + + Invokes kernels as specified in the \p launchParamsList array where each element + of the array specifies all the parameters required to perform a single kernel launch. + These kernels can cooperate and synchronize as they execute. The size of the array is + specified by \p numDevices. + + No two kernels can be launched on the same device. All the devices targeted by this + multi-device launch must be identical. All devices must have a non-zero value for the + device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH. + + All kernels launched must be identical with respect to the compiled code. Note that + any __device__, __constant__ or __managed__ variables present in the module that owns + the kernel launched on each device, are independently instantiated on every device. + It is the application's responsibility to ensure these variables are initialized and + used appropriately. + + The size of the grids as specified in blocks, the size of the blocks themselves + and the amount of shared memory used by each thread block must also match across + all launched kernels. + + The streams used to launch these kernels must have been created via either ::cuStreamCreate + or ::cuStreamCreateWithPriority. The NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD + cannot be used. + + The total number of blocks launched per kernel cannot exceed the maximum number of blocks + per multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or + ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the + total number of blocks launched per device has to match across all devices, the maximum + number of blocks that can be launched per device will be limited by the device with the + least number of multiprocessors. + + The kernels cannot make use of CUDA dynamic parallelism. + + The ::CUDA_LAUNCH_PARAMS structure is defined as: + \code +typedef struct CUDA_LAUNCH_PARAMS_st +{ +CUfunction function; +unsigned int gridDimX; +unsigned int gridDimY; +unsigned int gridDimZ; +unsigned int blockDimX; +unsigned int blockDimY; +unsigned int blockDimZ; +unsigned int sharedMemBytes; +CUstream hStream; +void **kernelParams; +} CUDA_LAUNCH_PARAMS; + \endcode + where: + - ::CUDA_LAUNCH_PARAMS::function specifies the kernel to be launched. All functions must + be identical with respect to the compiled code. + Note that you can also specify context-less kernel ::CUkernel by querying the handle + using ::cuLibraryGetKernel() and then casting to ::CUfunction. In this case, the context to + launch the kernel on be taken from the specified stream ::CUDA_LAUNCH_PARAMS::hStream. + - ::CUDA_LAUNCH_PARAMS::gridDimX is the width of the grid in blocks. This must match across + all kernels launched. + - ::CUDA_LAUNCH_PARAMS::gridDimY is the height of the grid in blocks. This must match across + all kernels launched. + - ::CUDA_LAUNCH_PARAMS::gridDimZ is the depth of the grid in blocks. This must match across + all kernels launched. + - ::CUDA_LAUNCH_PARAMS::blockDimX is the X dimension of each thread block. This must match across + all kernels launched. + - ::CUDA_LAUNCH_PARAMS::blockDimX is the Y dimension of each thread block. This must match across + all kernels launched. + - ::CUDA_LAUNCH_PARAMS::blockDimZ is the Z dimension of each thread block. This must match across + all kernels launched. + - ::CUDA_LAUNCH_PARAMS::sharedMemBytes is the dynamic shared-memory size per thread block in bytes. + This must match across all kernels launched. + - ::CUDA_LAUNCH_PARAMS::hStream is the handle to the stream to perform the launch in. This cannot + be the NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD. The CUDA context associated + with this stream must match that associated with ::CUDA_LAUNCH_PARAMS::function. + - ::CUDA_LAUNCH_PARAMS::kernelParams is an array of pointers to kernel parameters. If + ::CUDA_LAUNCH_PARAMS::function has N parameters, then ::CUDA_LAUNCH_PARAMS::kernelParams + needs to be an array of N pointers. Each of ::CUDA_LAUNCH_PARAMS::kernelParams[0] through + ::CUDA_LAUNCH_PARAMS::kernelParams[N-1] must point to a region of memory from which the actual + kernel parameter will be copied. The number of kernel parameters and their offsets and sizes + do not need to be specified as that information is retrieved directly from the kernel's image. + + By default, the kernel won't begin execution on any GPU until all prior work in all the specified + streams has completed. This behavior can be overridden by specifying the flag + ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. When this flag is specified, each kernel + will only wait for prior work in the stream corresponding to that GPU to complete before it begins + execution. + + Similarly, by default, any subsequent work pushed in any of the specified streams will not begin + execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying + the flag ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. When this flag is specified, + any subsequent work pushed in any of the specified streams will only wait for the kernel launched + on the GPU corresponding to that stream to complete before it begins execution. + + Calling ::cuLaunchCooperativeKernelMultiDevice() sets persistent function state that is + the same as function state set through ::cuLaunchKernel API when called individually for each + element in \p launchParamsList. + + When kernels are launched via ::cuLaunchCooperativeKernelMultiDevice(), the previous + block shape, shared size and parameter info associated with each ::CUDA_LAUNCH_PARAMS::function + in \p launchParamsList is overwritten. + + Note that to use ::cuLaunchCooperativeKernelMultiDevice(), the kernels must either have + been compiled with toolchain version 3.2 or later so that it will + contain kernel parameter information, or have no kernel parameters. + If either of these conditions is not met, then ::cuLaunchCooperativeKernelMultiDevice() will + return ::CUDA_ERROR_INVALID_IMAGE. + + \param launchParamsList - List of launch parameters, one per device + \param numDevices - Size of the \p launchParamsList array + \param flags - Flags to control launch behavior + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_IMAGE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_LAUNCH_FAILED, + ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + \note_null_stream + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuFuncSetCacheConfig, + ::cuFuncGetAttribute, + ::cuLaunchCooperativeKernel, + ::cudaLaunchCooperativeKernelMultiDevice*/ + fn cuLaunchCooperativeKernelMultiDevice( + launchParamsList: *mut cuda_types::CUDA_LAUNCH_PARAMS, + numDevices: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Enqueues a host function call in a stream + + Enqueues a host function to run in a stream. The function will be called + after currently enqueued work and will block work added after it. + + The host function must not make any CUDA API calls. Attempting to use a + CUDA API may result in ::CUDA_ERROR_NOT_PERMITTED, but this is not required. + The host function must not perform any synchronization that may depend on + outstanding CUDA work not mandated to run earlier. Host functions without a + mandated order (such as in independent streams) execute in undefined order + and may be serialized. + + For the purposes of Unified Memory, execution makes a number of guarantees: +
    +
  • The stream is considered idle for the duration of the function's + execution. Thus, for example, the function may always use memory attached + to the stream it was enqueued in.
  • +
  • The start of execution of the function has the same effect as + synchronizing an event recorded in the same stream immediately prior to + the function. It thus synchronizes streams which have been "joined" + prior to the function.
  • +
  • Adding device work to any stream does not have the effect of making + the stream active until all preceding host functions and stream callbacks + have executed. Thus, for + example, a function might use global attached memory even if work has + been added to another stream, if the work has been ordered behind the + function call with an event.
  • +
  • Completion of the function does not cause a stream to become + active except as described above. The stream will remain idle + if no device work follows the function, and will remain idle across + consecutive host functions or stream callbacks without device work in + between. Thus, for example, + stream synchronization can be done by signaling from a host function at the + end of the stream.
  • +
+ + Note that, in contrast to ::cuStreamAddCallback, the function will not be + called in the event of an error in the CUDA context. + + \param hStream - Stream to enqueue function call in + \param fn - The function to call once preceding stream operations are complete + \param userData - User-specified data to be passed to the function + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_SUPPORTED + \note_null_stream + \notefnerr + + \sa ::cuStreamCreate, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamWaitEvent, + ::cuStreamDestroy, + ::cuMemAllocManaged, + ::cuStreamAttachMemAsync, + ::cuStreamAddCallback*/ + fn cuLaunchHostFunc_ptsz( + hStream: cuda_types::CUstream, + fn_: cuda_types::CUhostFn, + userData: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Sets the block-dimensions for the function + + \deprecated + + Specifies the \p x, \p y, and \p z dimensions of the thread blocks that are + created when the kernel given by \p hfunc is launched. + + \param hfunc - Kernel to specify dimensions of + \param x - X dimension + \param y - Y dimension + \param z - Z dimension + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuFuncSetSharedSize, + ::cuFuncSetCacheConfig, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSeti, + ::cuParamSetf, + ::cuParamSetv, + ::cuLaunch, + ::cuLaunchGrid, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuFuncSetBlockShape( + hfunc: cuda_types::CUfunction, + x: ::core::ffi::c_int, + y: ::core::ffi::c_int, + z: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Sets the dynamic shared-memory size for the function + + \deprecated + + Sets through \p bytes the amount of dynamic shared memory that will be + available to each thread block when the kernel given by \p hfunc is launched. + + \param hfunc - Kernel to specify dynamic shared-memory size for + \param bytes - Dynamic shared-memory size per thread in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetCacheConfig, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSeti, + ::cuParamSetf, + ::cuParamSetv, + ::cuLaunch, + ::cuLaunchGrid, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuFuncSetSharedSize( + hfunc: cuda_types::CUfunction, + bytes: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Sets the parameter size for the function + + \deprecated + + Sets through \p numbytes the total size in bytes needed by the function + parameters of the kernel corresponding to \p hfunc. + + \param hfunc - Kernel to set parameter size for + \param numbytes - Size of parameter list in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetSharedSize, + ::cuFuncGetAttribute, + ::cuParamSetf, + ::cuParamSeti, + ::cuParamSetv, + ::cuLaunch, + ::cuLaunchGrid, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuParamSetSize( + hfunc: cuda_types::CUfunction, + numbytes: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Adds an integer parameter to the function's argument list + + \deprecated + + Sets an integer parameter that will be specified the next time the + kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. + + \param hfunc - Kernel to add parameter to + \param offset - Offset to add parameter to argument list + \param value - Value of parameter + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetSharedSize, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSetf, + ::cuParamSetv, + ::cuLaunch, + ::cuLaunchGrid, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuParamSeti( + hfunc: cuda_types::CUfunction, + offset: ::core::ffi::c_int, + value: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Adds a floating-point parameter to the function's argument list + + \deprecated + + Sets a floating-point parameter that will be specified the next time the + kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. + + \param hfunc - Kernel to add parameter to + \param offset - Offset to add parameter to argument list + \param value - Value of parameter + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetSharedSize, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSeti, + ::cuParamSetv, + ::cuLaunch, + ::cuLaunchGrid, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuParamSetf( + hfunc: cuda_types::CUfunction, + offset: ::core::ffi::c_int, + value: f32, + ) -> cuda_types::CUresult; + /** \brief Adds arbitrary data to the function's argument list + + \deprecated + + Copies an arbitrary amount of data (specified in \p numbytes) from \p ptr + into the parameter space of the kernel corresponding to \p hfunc. \p offset + is a byte offset. + + \param hfunc - Kernel to add data to + \param offset - Offset to add data to argument list + \param ptr - Pointer to arbitrary data + \param numbytes - Size of data to copy in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetSharedSize, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSetf, + ::cuParamSeti, + ::cuLaunch, + ::cuLaunchGrid, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuParamSetv( + hfunc: cuda_types::CUfunction, + offset: ::core::ffi::c_int, + ptr: *mut ::core::ffi::c_void, + numbytes: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Launches a CUDA function + + \deprecated + + Invokes the kernel \p f on a 1 x 1 x 1 grid of blocks. The block + contains the number of threads specified by a previous call to + ::cuFuncSetBlockShape(). + + The block shape, dynamic shared memory size, and parameter information + must be set using + ::cuFuncSetBlockShape(), + ::cuFuncSetSharedSize(), + ::cuParamSetSize(), + ::cuParamSeti(), + ::cuParamSetf(), and + ::cuParamSetv() + prior to calling this function. + + Launching a function via ::cuLaunchKernel() invalidates the function's + block shape, dynamic shared memory size, and parameter information. After + launching via cuLaunchKernel, this state must be re-initialized prior to + calling this function. Failure to do so results in undefined behavior. + + \param f - Kernel to launch + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_LAUNCH_FAILED, + ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetSharedSize, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSetf, + ::cuParamSeti, + ::cuParamSetv, + ::cuLaunchGrid, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuLaunch(f: cuda_types::CUfunction) -> cuda_types::CUresult; + /** \brief Launches a CUDA function + + \deprecated + + Invokes the kernel \p f on a \p grid_width x \p grid_height grid of + blocks. Each block contains the number of threads specified by a previous + call to ::cuFuncSetBlockShape(). + + The block shape, dynamic shared memory size, and parameter information + must be set using + ::cuFuncSetBlockShape(), + ::cuFuncSetSharedSize(), + ::cuParamSetSize(), + ::cuParamSeti(), + ::cuParamSetf(), and + ::cuParamSetv() + prior to calling this function. + + Launching a function via ::cuLaunchKernel() invalidates the function's + block shape, dynamic shared memory size, and parameter information. After + launching via cuLaunchKernel, this state must be re-initialized prior to + calling this function. Failure to do so results in undefined behavior. + + \param f - Kernel to launch + \param grid_width - Width of grid in blocks + \param grid_height - Height of grid in blocks + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_LAUNCH_FAILED, + ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetSharedSize, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSetf, + ::cuParamSeti, + ::cuParamSetv, + ::cuLaunch, + ::cuLaunchGridAsync, + ::cuLaunchKernel*/ + fn cuLaunchGrid( + f: cuda_types::CUfunction, + grid_width: ::core::ffi::c_int, + grid_height: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Launches a CUDA function + + \deprecated + + Invokes the kernel \p f on a \p grid_width x \p grid_height grid of + blocks. Each block contains the number of threads specified by a previous + call to ::cuFuncSetBlockShape(). + + The block shape, dynamic shared memory size, and parameter information + must be set using + ::cuFuncSetBlockShape(), + ::cuFuncSetSharedSize(), + ::cuParamSetSize(), + ::cuParamSeti(), + ::cuParamSetf(), and + ::cuParamSetv() + prior to calling this function. + + Launching a function via ::cuLaunchKernel() invalidates the function's + block shape, dynamic shared memory size, and parameter information. After + launching via cuLaunchKernel, this state must be re-initialized prior to + calling this function. Failure to do so results in undefined behavior. + + \param f - Kernel to launch + \param grid_width - Width of grid in blocks + \param grid_height - Height of grid in blocks + \param hStream - Stream identifier + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_LAUNCH_FAILED, + ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + + \note In certain cases where cubins are created with no ABI (i.e., using \p ptxas \p --abi-compile \p no), + this function may serialize kernel launches. The CUDA driver retains asynchronous behavior by + growing the per-thread stack as needed per launch and not shrinking it afterwards. + + \note_null_stream + \notefnerr + + \sa ::cuFuncSetBlockShape, + ::cuFuncSetSharedSize, + ::cuFuncGetAttribute, + ::cuParamSetSize, + ::cuParamSetf, + ::cuParamSeti, + ::cuParamSetv, + ::cuLaunch, + ::cuLaunchGrid, + ::cuLaunchKernel*/ + fn cuLaunchGridAsync( + f: cuda_types::CUfunction, + grid_width: ::core::ffi::c_int, + grid_height: ::core::ffi::c_int, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Adds a texture-reference to the function's argument list + + \deprecated + + Makes the CUDA array or linear memory bound to the texture reference + \p hTexRef available to a device program as a texture. In this version of + CUDA, the texture-reference must be obtained via ::cuModuleGetTexRef() and + the \p texunit parameter must be set to ::CU_PARAM_TR_DEFAULT. + + \param hfunc - Kernel to add texture-reference to + \param texunit - Texture unit (must be ::CU_PARAM_TR_DEFAULT) + \param hTexRef - Texture-reference to add to argument list + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr*/ + fn cuParamSetTexRef( + hfunc: cuda_types::CUfunction, + texunit: ::core::ffi::c_int, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Sets the shared memory configuration for a device function. + + \deprecated + + On devices with configurable shared memory banks, this function will + force all subsequent launches of the specified device function to have + the given shared memory bank size configuration. On any given launch of the + function, the shared memory configuration of the device will be temporarily + changed if needed to suit the function's preferred configuration. Changes in + shared memory configuration between subsequent launches of functions, + may introduce a device side synchronization point. + + Any per-function setting of shared memory bank size set via + ::cuFuncSetSharedMemConfig will override the context wide setting set with + ::cuCtxSetSharedMemConfig. + + Changing the shared memory bank size will not increase shared memory usage + or affect occupancy of kernels, but may have major effects on performance. + Larger bank sizes will allow for greater potential bandwidth to shared memory, + but will change what kinds of accesses to shared memory will result in bank + conflicts. + + This function will do nothing on devices with fixed shared memory bank size. + + The supported bank configurations are: + - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: use the context's shared memory + configuration when launching this function. + - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to + be natively four bytes when launching this function. + - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to + be natively eight bytes when launching this function. + + \param hfunc - kernel to be given a shared memory config + \param config - requested shared memory configuration + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa ::cuCtxGetCacheConfig, + ::cuCtxSetCacheConfig, + ::cuCtxGetSharedMemConfig, + ::cuCtxSetSharedMemConfig, + ::cuFuncGetAttribute, + ::cuLaunchKernel, + ::cudaFuncSetSharedMemConfig*/ + fn cuFuncSetSharedMemConfig( + hfunc: cuda_types::CUfunction, + config: cuda_types::CUsharedconfig, + ) -> cuda_types::CUresult; + /** \brief Creates a graph + + Creates an empty graph, which is returned via \p phGraph. + + \param phGraph - Returns newly created graph + \param flags - Graph creation flags, must be 0 + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddHostNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode, + ::cuGraphInstantiate, + ::cuGraphDestroy, + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphGetEdges, + ::cuGraphClone*/ + fn cuGraphCreate( + phGraph: *mut cuda_types::CUgraph, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Creates a kernel execution node and adds it to a graph + + Creates a new kernel execution node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies and arguments specified in \p nodeParams. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + The CUDA_KERNEL_NODE_PARAMS structure is defined as: + + \code + typedef struct CUDA_KERNEL_NODE_PARAMS_st { + CUfunction func; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + void **kernelParams; + void **extra; + CUkernel kern; + CUcontext ctx; + } CUDA_KERNEL_NODE_PARAMS; + \endcode + + When the graph is launched, the node will invoke kernel \p func on a (\p gridDimX x + \p gridDimY x \p gridDimZ) grid of blocks. Each block contains + (\p blockDimX x \p blockDimY x \p blockDimZ) threads. + + \p sharedMemBytes sets the amount of dynamic shared memory that will be + available to each thread block. + + Kernel parameters to \p func can be specified in one of two ways: + + 1) Kernel parameters can be specified via \p kernelParams. If the kernel has N + parameters, then \p kernelParams needs to be an array of N pointers. Each pointer, + from \p kernelParams[0] to \p kernelParams[N-1], points to the region of memory from which the actual + parameter will be copied. The number of kernel parameters and their offsets and sizes do not need + to be specified as that information is retrieved directly from the kernel's image. + + 2) Kernel parameters for non-cooperative kernels can also be packaged by the application into a single + buffer that is passed in via \p extra. This places the burden on the application of knowing each + kernel parameter's size and alignment/padding within the buffer. The \p extra parameter exists + to allow this function to take additional less commonly used arguments. \p extra specifies + a list of names of extra settings and their corresponding values. Each extra setting name is + immediately followed by the corresponding value. The list must be terminated with either NULL or + CU_LAUNCH_PARAM_END. + + - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + array; + - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + value in \p extra will be a pointer to a buffer + containing all the kernel parameters for launching kernel + \p func; + - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + value in \p extra will be a pointer to a size_t + containing the size of the buffer specified with + ::CU_LAUNCH_PARAM_BUFFER_POINTER; + + The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel parameters are specified with both + \p kernelParams and \p extra (i.e. both \p kernelParams and \p extra are non-NULL). + ::CUDA_ERROR_INVALID_VALUE will be returned if \p extra is used for a cooperative kernel. + + The \p kernelParams or \p extra array, as well as the argument values it points to, + are copied during this call. + + \note Kernels launched using graphs must not use texture and surface references. Reading or + writing through any texture or surface reference is undefined behavior. + This restriction does not apply to texture and surface objects. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param nodeParams - Parameters for the GPU execution node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuLaunchKernel, + ::cuLaunchCooperativeKernel, + ::cuGraphKernelNodeGetParams, + ::cuGraphKernelNodeSetParams, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddHostNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddKernelNode_v2( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Returns a kernel node's parameters + + Returns the parameters of kernel node \p hNode in \p nodeParams. + The \p kernelParams or \p extra array returned in \p nodeParams, + as well as the argument values it points to, are owned by the node. + This memory remains valid until the node is destroyed or its + parameters are modified, and should not be modified + directly. Use ::cuGraphKernelNodeSetParams to update the + parameters of this node. + + The params will contain either \p kernelParams or \p extra, + according to which of these was most recently set on the node. + + \param hNode - Node to get the parameters for + \param nodeParams - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuLaunchKernel, + ::cuGraphAddKernelNode, + ::cuGraphKernelNodeSetParams*/ + fn cuGraphKernelNodeGetParams_v2( + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets a kernel node's parameters + + Sets the parameters of kernel node \p hNode to \p nodeParams. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuLaunchKernel, + ::cuGraphAddKernelNode, + ::cuGraphKernelNodeGetParams*/ + fn cuGraphKernelNodeSetParams_v2( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Creates a memcpy node and adds it to a graph + + Creates a new memcpy node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + When the graph is launched, the node will perform the memcpy described by \p copyParams. + See ::cuMemcpy3D() for a description of the structure and its restrictions. + + Memcpy nodes have some additional restrictions with regards to managed memory, if the + system contains at least one device which has a zero value for the device attribute + ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or more of the operands refer + to managed memory, then using the memory type ::CU_MEMORYTYPE_UNIFIED is disallowed + for those operand(s). The managed memory will be treated as residing on either the + host or the device, depending on which memory type is specified. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param copyParams - Parameters for the memory copy + \param ctx - Context on which to run the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuMemcpy3D, + ::cuGraphMemcpyNodeGetParams, + ::cuGraphMemcpyNodeSetParams, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddHostNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddMemcpyNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + copyParams: *const cuda_types::CUDA_MEMCPY3D, + ctx: cuda_types::CUcontext, + ) -> cuda_types::CUresult; + /** \brief Returns a memcpy node's parameters + + Returns the parameters of memcpy node \p hNode in \p nodeParams. + + \param hNode - Node to get the parameters for + \param nodeParams - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuMemcpy3D, + ::cuGraphAddMemcpyNode, + ::cuGraphMemcpyNodeSetParams*/ + fn cuGraphMemcpyNodeGetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_MEMCPY3D, + ) -> cuda_types::CUresult; + /** \brief Sets a memcpy node's parameters + + Sets the parameters of memcpy node \p hNode to \p nodeParams. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuMemcpy3D, + ::cuGraphAddMemcpyNode, + ::cuGraphMemcpyNodeGetParams*/ + fn cuGraphMemcpyNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_MEMCPY3D, + ) -> cuda_types::CUresult; + /** \brief Creates a memset node and adds it to a graph + + Creates a new memset node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + The element size must be 1, 2, or 4 bytes. + When the graph is launched, the node will perform the memset described by \p memsetParams. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param memsetParams - Parameters for the memory set + \param ctx - Context on which to run the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_CONTEXT + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuMemsetD2D32, + ::cuGraphMemsetNodeGetParams, + ::cuGraphMemsetNodeSetParams, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddHostNode, + ::cuGraphAddMemcpyNode*/ + fn cuGraphAddMemsetNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS, + ctx: cuda_types::CUcontext, + ) -> cuda_types::CUresult; + /** \brief Returns a memset node's parameters + + Returns the parameters of memset node \p hNode in \p nodeParams. + + \param hNode - Node to get the parameters for + \param nodeParams - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuMemsetD2D32, + ::cuGraphAddMemsetNode, + ::cuGraphMemsetNodeSetParams*/ + fn cuGraphMemsetNodeGetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_MEMSET_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets a memset node's parameters + + Sets the parameters of memset node \p hNode to \p nodeParams. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuMemsetD2D32, + ::cuGraphAddMemsetNode, + ::cuGraphMemsetNodeGetParams*/ + fn cuGraphMemsetNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Creates a host execution node and adds it to a graph + + Creates a new CPU execution node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies and arguments specified in \p nodeParams. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + When the graph is launched, the node will invoke the specified CPU function. + Host nodes are not supported under MPS with pre-Volta GPUs. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param nodeParams - Parameters for the host node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuLaunchHostFunc, + ::cuGraphHostNodeGetParams, + ::cuGraphHostNodeSetParams, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddHostNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Returns a host node's parameters + + Returns the parameters of host node \p hNode in \p nodeParams. + + \param hNode - Node to get the parameters for + \param nodeParams - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuLaunchHostFunc, + ::cuGraphAddHostNode, + ::cuGraphHostNodeSetParams*/ + fn cuGraphHostNodeGetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_HOST_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets a host node's parameters + + Sets the parameters of host node \p hNode to \p nodeParams. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuLaunchHostFunc, + ::cuGraphAddHostNode, + ::cuGraphHostNodeGetParams*/ + fn cuGraphHostNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Creates a child graph node and adds it to a graph + + Creates a new node which executes an embedded graph, and adds it to \p hGraph with + \p numDependencies dependencies specified via \p dependencies. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + If \p hGraph contains allocation or free nodes, this call will return an error. + + The node executes an embedded child graph. The child graph is cloned in this call. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param childGraph - The graph to clone into this node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphChildGraphNodeGetGraph, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddHostNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode, + ::cuGraphClone*/ + fn cuGraphAddChildGraphNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + childGraph: cuda_types::CUgraph, + ) -> cuda_types::CUresult; + /** \brief Gets a handle to the embedded graph of a child graph node + + Gets a handle to the embedded graph in a child graph node. This call + does not clone the graph. Changes to the graph will be reflected in + the node, and the node retains ownership of the graph. + + Allocation and free nodes cannot be added to the returned graph. + Attempting to do so will return an error. + + \param hNode - Node to get the embedded graph for + \param phGraph - Location to store a handle to the graph + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddChildGraphNode, + ::cuGraphNodeFindInClone*/ + fn cuGraphChildGraphNodeGetGraph( + hNode: cuda_types::CUgraphNode, + phGraph: *mut cuda_types::CUgraph, + ) -> cuda_types::CUresult; + /** \brief Creates an empty node and adds it to a graph + + Creates a new node which performs no operation, and adds it to \p hGraph with + \p numDependencies dependencies specified via \p dependencies. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + An empty node performs no operation during execution, but can be used for + transitive ordering. For example, a phased execution graph with 2 groups of n + nodes with a barrier between them can be represented using an empty node and + 2*n dependency edges, rather than no empty node and n^2 dependency edges. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddKernelNode, + ::cuGraphAddHostNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddEmptyNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + ) -> cuda_types::CUresult; + /** \brief Creates an event record node and adds it to a graph + + Creates a new event record node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies and event specified in \p event. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + Each launch of the graph will record \p event to capture execution of the + node's dependencies. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param event - Event for the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphAddEventWaitNode, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddEventRecordNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + event: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Returns the event associated with an event record node + + Returns the event of event record node \p hNode in \p event_out. + + \param hNode - Node to get the event for + \param event_out - Pointer to return the event + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddEventRecordNode, + ::cuGraphEventRecordNodeSetEvent, + ::cuGraphEventWaitNodeGetEvent, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent*/ + fn cuGraphEventRecordNodeGetEvent( + hNode: cuda_types::CUgraphNode, + event_out: *mut cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Sets an event record node's event + + Sets the event of event record node \p hNode to \p event. + + \param hNode - Node to set the event for + \param event - Event to use + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuGraphAddEventRecordNode, + ::cuGraphEventRecordNodeGetEvent, + ::cuGraphEventWaitNodeSetEvent, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent*/ + fn cuGraphEventRecordNodeSetEvent( + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Creates an event wait node and adds it to a graph + + Creates a new event wait node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies and event specified in \p event. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + The graph node will wait for all work captured in \p event. See ::cuEventRecord() + for details on what is captured by an event. \p event may be from a different context + or device than the launch stream. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param event - Event for the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphAddEventRecordNode, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddEventWaitNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + event: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Returns the event associated with an event wait node + + Returns the event of event wait node \p hNode in \p event_out. + + \param hNode - Node to get the event for + \param event_out - Pointer to return the event + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddEventWaitNode, + ::cuGraphEventWaitNodeSetEvent, + ::cuGraphEventRecordNodeGetEvent, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent*/ + fn cuGraphEventWaitNodeGetEvent( + hNode: cuda_types::CUgraphNode, + event_out: *mut cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Sets an event wait node's event + + Sets the event of event wait node \p hNode to \p event. + + \param hNode - Node to set the event for + \param event - Event to use + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuGraphAddEventWaitNode, + ::cuGraphEventWaitNodeGetEvent, + ::cuGraphEventRecordNodeSetEvent, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent*/ + fn cuGraphEventWaitNodeSetEvent( + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Creates an external semaphore signal node and adds it to a graph + + Creates a new external semaphore signal node and adds it to \p hGraph with \p + numDependencies dependencies specified via \p dependencies and arguments specified + in \p nodeParams. It is possible for \p numDependencies to be 0, in which case the + node will be placed at the root of the graph. \p dependencies may not have any + duplicate entries. A handle to the new node will be returned in \p phGraphNode. + + Performs a signal operation on a set of externally allocated semaphore objects + when the node is launched. The operation(s) will occur after all of the node's + dependencies have completed. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param nodeParams - Parameters for the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphExternalSemaphoresSignalNodeGetParams, + ::cuGraphExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuImportExternalSemaphore, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddEventRecordNode, + ::cuGraphAddEventWaitNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddExternalSemaphoresSignalNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Returns an external semaphore signal node's parameters + + Returns the parameters of an external semaphore signal node \p hNode in \p params_out. + The \p extSemArray and \p paramsArray returned in \p params_out, + are owned by the node. This memory remains valid until the node is destroyed or its + parameters are modified, and should not be modified + directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the + parameters of this node. + + \param hNode - Node to get the parameters for + \param params_out - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuLaunchKernel, + ::cuGraphAddExternalSemaphoresSignalNode, + ::cuGraphExternalSemaphoresSignalNodeSetParams, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync*/ + fn cuGraphExternalSemaphoresSignalNodeGetParams( + hNode: cuda_types::CUgraphNode, + params_out: *mut cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets an external semaphore signal node's parameters + + Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuGraphAddExternalSemaphoresSignalNode, + ::cuGraphExternalSemaphoresSignalNodeSetParams, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync*/ + fn cuGraphExternalSemaphoresSignalNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Creates an external semaphore wait node and adds it to a graph + + Creates a new external semaphore wait node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies and arguments specified in \p nodeParams. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. A handle + to the new node will be returned in \p phGraphNode. + + Performs a wait operation on a set of externally allocated semaphore objects + when the node is launched. The node's dependencies will not be launched until + the wait operation has completed. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param nodeParams - Parameters for the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphExternalSemaphoresWaitNodeGetParams, + ::cuGraphExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphAddExternalSemaphoresSignalNode, + ::cuImportExternalSemaphore, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddEventRecordNode, + ::cuGraphAddEventWaitNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddExternalSemaphoresWaitNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Returns an external semaphore wait node's parameters + + Returns the parameters of an external semaphore wait node \p hNode in \p params_out. + The \p extSemArray and \p paramsArray returned in \p params_out, + are owned by the node. This memory remains valid until the node is destroyed or its + parameters are modified, and should not be modified + directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the + parameters of this node. + + \param hNode - Node to get the parameters for + \param params_out - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuLaunchKernel, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuGraphExternalSemaphoresWaitNodeSetParams, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync*/ + fn cuGraphExternalSemaphoresWaitNodeGetParams( + hNode: cuda_types::CUgraphNode, + params_out: *mut cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets an external semaphore wait node's parameters + + Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuGraphExternalSemaphoresWaitNodeSetParams, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync*/ + fn cuGraphExternalSemaphoresWaitNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Creates a batch memory operation node and adds it to a graph + + Creates a new batch memory operation node and adds it to \p hGraph with \p + numDependencies dependencies specified via \p dependencies and arguments specified in \p nodeParams. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. + A handle to the new node will be returned in \p phGraphNode. + + When the node is added, the paramArray inside \p nodeParams is copied and therefore it can be + freed after the call returns. + + \note + Warning: + Improper use of this API may deadlock the application. Synchronization + ordering established through this API is not visible to CUDA. CUDA tasks + that are (even indirectly) ordered by this API should also have that order + expressed with CUDA-visible dependencies such as events. This ensures that + the scheduler does not serialize them in an improper order. For more + information, see the Stream Memory Operations section in the programming + guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html). + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param nodeParams - Parameters for the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuStreamBatchMemOp, + ::cuStreamWaitValue32, + ::cuStreamWriteValue32, + ::cuStreamWaitValue64, + ::cuStreamWriteValue64, + ::cuGraphBatchMemOpNodeGetParams, + ::cuGraphBatchMemOpNodeSetParams, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddBatchMemOpNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Returns a batch mem op node's parameters + + Returns the parameters of batch mem op node \p hNode in \p nodeParams_out. + The \p paramArray returned in \p nodeParams_out is owned by the node. + This memory remains valid until the node is destroyed or its + parameters are modified, and should not be modified + directly. Use ::cuGraphBatchMemOpNodeSetParams to update the + parameters of this node. + + \param hNode - Node to get the parameters for + \param nodeParams_out - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuStreamBatchMemOp, + ::cuGraphAddBatchMemOpNode, + ::cuGraphBatchMemOpNodeSetParams*/ + fn cuGraphBatchMemOpNodeGetParams( + hNode: cuda_types::CUgraphNode, + nodeParams_out: *mut cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets a batch mem op node's parameters + + Sets the parameters of batch mem op node \p hNode to \p nodeParams. + + The paramArray inside \p nodeParams is copied and therefore it can be + freed after the call returns. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetParams, + ::cuStreamBatchMemOp, + ::cuGraphAddBatchMemOpNode, + ::cuGraphBatchMemOpNodeGetParams*/ + fn cuGraphBatchMemOpNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets the parameters for a batch mem op node in the given graphExec + + Sets the parameters of a batch mem op node in an executable graph \p hGraphExec. + The node is identified by the corresponding node \p hNode in the + non-executable graph, from which the executable graph was instantiated. + + The following fields on operations may be modified on an executable graph: + + op.waitValue.address + op.waitValue.value[64] + op.waitValue.flags bits corresponding to wait type (i.e. CU_STREAM_WAIT_VALUE_FLUSH bit cannot be modified) + op.writeValue.address + op.writeValue.value[64] + + Other fields, such as the context, count or type of operations, and other types of operations such as membars, + may not be modified. + + \p hNode must not have been removed from the original graph. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + The paramArray inside \p nodeParams is copied and therefore it can be + freed after the call returns. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - Batch mem op node from the graph from which graphExec was instantiated + \param nodeParams - Updated Parameters to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuStreamBatchMemOp, + ::cuGraphAddBatchMemOpNode, + ::cuGraphBatchMemOpNodeGetParams, + ::cuGraphBatchMemOpNodeSetParams, + ::cuGraphInstantiate*/ + fn cuGraphExecBatchMemOpNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Creates an allocation node and adds it to a graph + + Creates a new allocation node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies and arguments specified in \p nodeParams. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. A handle + to the new node will be returned in \p phGraphNode. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param nodeParams - Parameters for the node + + When ::cuGraphAddMemAllocNode creates an allocation node, it returns the address of the allocation in + \p nodeParams.dptr. The allocation's address remains fixed across instantiations and launches. + + If the allocation is freed in the same graph, by creating a free node using ::cuGraphAddMemFreeNode, + the allocation can be accessed by nodes ordered after the allocation node but before the free node. + These allocations cannot be freed outside the owning graph, and they can only be freed once in the + owning graph. + + If the allocation is not freed in the same graph, then it can be accessed not only by nodes in the + graph which are ordered after the allocation node, but also by stream operations ordered after the + graph's execution but before the allocation is freed. + + Allocations which are not freed in the same graph can be freed by: + - passing the allocation to ::cuMemFreeAsync or ::cuMemFree; + - launching a graph with a free node for that allocation; or + - specifying ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH during instantiation, which makes + each launch behave as though it called ::cuMemFreeAsync for every unfreed allocation. + + It is not possible to free an allocation in both the owning graph and another graph. If the allocation + is freed in the same graph, a free node cannot be added to another graph. If the allocation is freed + in another graph, a free node can no longer be added to the owning graph. + + The following restrictions apply to graphs which contain allocation and/or memory free nodes: + - Nodes and edges of the graph cannot be deleted. + - The graph cannot be used in a child node. + - Only one instantiation of the graph may exist at any point in time. + - The graph cannot be cloned. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphAddMemFreeNode, + ::cuGraphMemAllocNodeGetParams, + ::cuDeviceGraphMemTrim, + ::cuDeviceGetGraphMemAttribute, + ::cuDeviceSetGraphMemAttribute, + ::cuMemAllocAsync, + ::cuMemFreeAsync, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddEventRecordNode, + ::cuGraphAddEventWaitNode, + ::cuGraphAddExternalSemaphoresSignalNode, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddMemAllocNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Returns a memory alloc node's parameters + + Returns the parameters of a memory alloc node \p hNode in \p params_out. + The \p poolProps and \p accessDescs returned in \p params_out, are owned by the + node. This memory remains valid until the node is destroyed. The returned + parameters must not be modified. + + \param hNode - Node to get the parameters for + \param params_out - Pointer to return the parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddMemAllocNode, + ::cuGraphMemFreeNodeGetParams*/ + fn cuGraphMemAllocNodeGetParams( + hNode: cuda_types::CUgraphNode, + params_out: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Creates a memory free node and adds it to a graph + + Creates a new memory free node and adds it to \p hGraph with \p numDependencies + dependencies specified via \p dependencies and arguments specified in \p nodeParams. + It is possible for \p numDependencies to be 0, in which case the node will be placed + at the root of the graph. \p dependencies may not have any duplicate entries. A handle + to the new node will be returned in \p phGraphNode. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param dptr - Address of memory to free + + ::cuGraphAddMemFreeNode will return ::CUDA_ERROR_INVALID_VALUE if the user attempts to free: + - an allocation twice in the same graph. + - an address that was not returned by an allocation node. + - an invalid address. + + The following restrictions apply to graphs which contain allocation and/or memory free nodes: + - Nodes and edges of the graph cannot be deleted. + - The graph cannot be used in a child node. + - Only one instantiation of the graph may exist at any point in time. + - The graph cannot be cloned. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphAddMemAllocNode, + ::cuGraphMemFreeNodeGetParams, + ::cuDeviceGraphMemTrim, + ::cuDeviceGetGraphMemAttribute, + ::cuDeviceSetGraphMemAttribute, + ::cuMemAllocAsync, + ::cuMemFreeAsync, + ::cuGraphCreate, + ::cuGraphDestroyNode, + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddEventRecordNode, + ::cuGraphAddEventWaitNode, + ::cuGraphAddExternalSemaphoresSignalNode, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuGraphAddKernelNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphAddMemFreeNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + dptr: cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Returns a memory free node's parameters + + Returns the address of a memory free node \p hNode in \p dptr_out. + + \param hNode - Node to get the parameters for + \param dptr_out - Pointer to return the device address + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddMemFreeNode, + ::cuGraphMemAllocNodeGetParams*/ + fn cuGraphMemFreeNodeGetParams( + hNode: cuda_types::CUgraphNode, + dptr_out: *mut cuda_types::CUdeviceptr, + ) -> cuda_types::CUresult; + /** \brief Free unused memory that was cached on the specified device for use with graphs back to the OS. + + Blocks which are not in use by a graph that is either currently executing or scheduled to execute are + freed back to the operating system. + + \param device - The device for which cached memory should be freed. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_DEVICE + + \sa + ::cuGraphAddMemAllocNode, + ::cuGraphAddMemFreeNode, + ::cuDeviceSetGraphMemAttribute, + ::cuDeviceGetGraphMemAttribute*/ + fn cuDeviceGraphMemTrim(device: cuda_types::CUdevice) -> cuda_types::CUresult; + /** \brief Query asynchronous allocation attributes related to graphs + + Valid attributes are: + + - ::CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: Amount of memory, in bytes, currently associated with graphs + - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the + last time it was reset. High watermark can only be reset to zero. + - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: Amount of memory, in bytes, currently allocated for use by + the CUDA graphs asynchronous allocator. + - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by + the CUDA graphs asynchronous allocator. + + \param device - Specifies the scope of the query + \param attr - attribute to get + \param value - retrieved value + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_DEVICE + + \sa + ::cuDeviceSetGraphMemAttribute, + ::cuGraphAddMemAllocNode, + ::cuGraphAddMemFreeNode*/ + fn cuDeviceGetGraphMemAttribute( + device: cuda_types::CUdevice, + attr: cuda_types::CUgraphMem_attribute, + value: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Set asynchronous allocation attributes related to graphs + + Valid attributes are: + + - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the + last time it was reset. High watermark can only be reset to zero. + - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by + the CUDA graphs asynchronous allocator. + + \param device - Specifies the scope of the query + \param attr - attribute to get + \param value - pointer to value to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_DEVICE + + \sa + ::cuDeviceGetGraphMemAttribute, + ::cuGraphAddMemAllocNode, + ::cuGraphAddMemFreeNode*/ + fn cuDeviceSetGraphMemAttribute( + device: cuda_types::CUdevice, + attr: cuda_types::CUgraphMem_attribute, + value: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Clones a graph + + This function creates a copy of \p originalGraph and returns it in \p phGraphClone. + All parameters are copied into the cloned graph. The original graph may be modified + after this call without affecting the clone. + + Child graph nodes in the original graph are recursively copied into the clone. + + \param phGraphClone - Returns newly created cloned graph + \param originalGraph - Graph to clone + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphCreate, + ::cuGraphNodeFindInClone*/ + fn cuGraphClone( + phGraphClone: *mut cuda_types::CUgraph, + originalGraph: cuda_types::CUgraph, + ) -> cuda_types::CUresult; + /** \brief Finds a cloned version of a node + + This function returns the node in \p hClonedGraph corresponding to \p hOriginalNode + in the original graph. + + \p hClonedGraph must have been cloned from \p hOriginalGraph via ::cuGraphClone. + \p hOriginalNode must have been in \p hOriginalGraph at the time of the call to + ::cuGraphClone, and the corresponding cloned node in \p hClonedGraph must not have + been removed. The cloned node is then returned via \p phClonedNode. + + \param phNode - Returns handle to the cloned node + \param hOriginalNode - Handle to the original node + \param hClonedGraph - Cloned graph to query + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphClone*/ + fn cuGraphNodeFindInClone( + phNode: *mut cuda_types::CUgraphNode, + hOriginalNode: cuda_types::CUgraphNode, + hClonedGraph: cuda_types::CUgraph, + ) -> cuda_types::CUresult; + /** \brief Returns a node's type + + Returns the node type of \p hNode in \p type. + + \param hNode - Node to query + \param type - Pointer to return the node type + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphChildGraphNodeGetGraph, + ::cuGraphKernelNodeGetParams, + ::cuGraphKernelNodeSetParams, + ::cuGraphHostNodeGetParams, + ::cuGraphHostNodeSetParams, + ::cuGraphMemcpyNodeGetParams, + ::cuGraphMemcpyNodeSetParams, + ::cuGraphMemsetNodeGetParams, + ::cuGraphMemsetNodeSetParams*/ + fn cuGraphNodeGetType( + hNode: cuda_types::CUgraphNode, + type_: *mut cuda_types::CUgraphNodeType, + ) -> cuda_types::CUresult; + /** \brief Returns a graph's nodes + + Returns a list of \p hGraph's nodes. \p nodes may be NULL, in which case this + function will return the number of nodes in \p numNodes. Otherwise, + \p numNodes entries will be filled in. If \p numNodes is higher than the actual + number of nodes, the remaining entries in \p nodes will be set to NULL, and the + number of nodes actually obtained will be returned in \p numNodes. + + \param hGraph - Graph to query + \param nodes - Pointer to return the nodes + \param numNodes - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphCreate, + ::cuGraphGetRootNodes, + ::cuGraphGetEdges, + ::cuGraphNodeGetType, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphGetNodes( + hGraph: cuda_types::CUgraph, + nodes: *mut cuda_types::CUgraphNode, + numNodes: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns a graph's root nodes + + Returns a list of \p hGraph's root nodes. \p rootNodes may be NULL, in which case this + function will return the number of root nodes in \p numRootNodes. Otherwise, + \p numRootNodes entries will be filled in. If \p numRootNodes is higher than the actual + number of root nodes, the remaining entries in \p rootNodes will be set to NULL, and the + number of nodes actually obtained will be returned in \p numRootNodes. + + \param hGraph - Graph to query + \param rootNodes - Pointer to return the root nodes + \param numRootNodes - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphCreate, + ::cuGraphGetNodes, + ::cuGraphGetEdges, + ::cuGraphNodeGetType, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphGetRootNodes( + hGraph: cuda_types::CUgraph, + rootNodes: *mut cuda_types::CUgraphNode, + numRootNodes: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns a graph's dependency edges + + Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding + indices in \p from and \p to; that is, the node in \p to[i] has a dependency on the + node in \p from[i]. \p from and \p to may both be NULL, in which + case this function only returns the number of edges in \p numEdges. Otherwise, + \p numEdges entries will be filled in. If \p numEdges is higher than the actual + number of edges, the remaining entries in \p from and \p to will be set to NULL, and + the number of edges actually returned will be written to \p numEdges. + + \param hGraph - Graph to get the edges from + \param from - Location to return edge endpoints + \param to - Location to return edge endpoints + \param numEdges - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphAddDependencies, + ::cuGraphRemoveDependencies, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphGetEdges( + hGraph: cuda_types::CUgraph, + from: *mut cuda_types::CUgraphNode, + to: *mut cuda_types::CUgraphNode, + numEdges: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns a graph's dependency edges (12.3+) + + Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding + indices in \p from, \p to and \p edgeData; that is, the node in \p to[i] has a + dependency on the node in \p from[i] with data \p edgeData[i]. \p from and \p to may + both be NULL, in which case this function only returns the number of edges in + \p numEdges. Otherwise, \p numEdges entries will be filled in. If \p numEdges is higher + than the actual number of edges, the remaining entries in \p from and \p to will be + set to NULL, and the number of edges actually returned will be written to \p numEdges. + \p edgeData may alone be NULL, in which case the edges must all have default (zeroed) + edge data. Attempting a lossy query via NULL \p edgeData will result in + ::CUDA_ERROR_LOSSY_QUERY. If \p edgeData is non-NULL then \p from and \p to must be + as well. + + \param hGraph - Graph to get the edges from + \param from - Location to return edge endpoints + \param to - Location to return edge endpoints + \param edgeData - Optional location to return edge data + \param numEdges - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_LOSSY_QUERY, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphAddDependencies, + ::cuGraphRemoveDependencies, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphGetEdges_v2( + hGraph: cuda_types::CUgraph, + from: *mut cuda_types::CUgraphNode, + to: *mut cuda_types::CUgraphNode, + edgeData: *mut cuda_types::CUgraphEdgeData, + numEdges: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns a node's dependencies + + Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this + function will return the number of dependencies in \p numDependencies. Otherwise, + \p numDependencies entries will be filled in. If \p numDependencies is higher than the actual + number of dependencies, the remaining entries in \p dependencies will be set to NULL, and the + number of nodes actually obtained will be returned in \p numDependencies. + + \param hNode - Node to query + \param dependencies - Pointer to return the dependencies + \param numDependencies - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeGetDependentNodes, + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphGetEdges, + ::cuGraphAddDependencies, + ::cuGraphRemoveDependencies*/ + fn cuGraphNodeGetDependencies( + hNode: cuda_types::CUgraphNode, + dependencies: *mut cuda_types::CUgraphNode, + numDependencies: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns a node's dependencies (12.3+) + + Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this + function will return the number of dependencies in \p numDependencies. Otherwise, + \p numDependencies entries will be filled in. If \p numDependencies is higher than the actual + number of dependencies, the remaining entries in \p dependencies will be set to NULL, and the + number of nodes actually obtained will be returned in \p numDependencies. + + Note that if an edge has non-zero (non-default) edge data and \p edgeData is NULL, + this API will return ::CUDA_ERROR_LOSSY_QUERY. If \p edgeData is non-NULL, then + \p dependencies must be as well. + + \param hNode - Node to query + \param dependencies - Pointer to return the dependencies + \param edgeData - Optional array to return edge data for each dependency + \param numDependencies - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_LOSSY_QUERY, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeGetDependentNodes, + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphGetEdges, + ::cuGraphAddDependencies, + ::cuGraphRemoveDependencies*/ + fn cuGraphNodeGetDependencies_v2( + hNode: cuda_types::CUgraphNode, + dependencies: *mut cuda_types::CUgraphNode, + edgeData: *mut cuda_types::CUgraphEdgeData, + numDependencies: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns a node's dependent nodes + + Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which + case this function will return the number of dependent nodes in \p numDependentNodes. + Otherwise, \p numDependentNodes entries will be filled in. If \p numDependentNodes is + higher than the actual number of dependent nodes, the remaining entries in + \p dependentNodes will be set to NULL, and the number of nodes actually obtained will + be returned in \p numDependentNodes. + + \param hNode - Node to query + \param dependentNodes - Pointer to return the dependent nodes + \param numDependentNodes - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeGetDependencies, + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphGetEdges, + ::cuGraphAddDependencies, + ::cuGraphRemoveDependencies*/ + fn cuGraphNodeGetDependentNodes( + hNode: cuda_types::CUgraphNode, + dependentNodes: *mut cuda_types::CUgraphNode, + numDependentNodes: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Returns a node's dependent nodes (12.3+) + + Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which + case this function will return the number of dependent nodes in \p numDependentNodes. + Otherwise, \p numDependentNodes entries will be filled in. If \p numDependentNodes is + higher than the actual number of dependent nodes, the remaining entries in + \p dependentNodes will be set to NULL, and the number of nodes actually obtained will + be returned in \p numDependentNodes. + + Note that if an edge has non-zero (non-default) edge data and \p edgeData is NULL, + this API will return ::CUDA_ERROR_LOSSY_QUERY. If \p edgeData is non-NULL, then + \p dependentNodes must be as well. + + \param hNode - Node to query + \param dependentNodes - Pointer to return the dependent nodes + \param edgeData - Optional pointer to return edge data for dependent nodes + \param numDependentNodes - See description + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_LOSSY_QUERY, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeGetDependencies, + ::cuGraphGetNodes, + ::cuGraphGetRootNodes, + ::cuGraphGetEdges, + ::cuGraphAddDependencies, + ::cuGraphRemoveDependencies*/ + fn cuGraphNodeGetDependentNodes_v2( + hNode: cuda_types::CUgraphNode, + dependentNodes: *mut cuda_types::CUgraphNode, + edgeData: *mut cuda_types::CUgraphEdgeData, + numDependentNodes: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Adds dependency edges to a graph + + The number of dependencies to be added is defined by \p numDependencies + Elements in \p from and \p to at corresponding indices define a dependency. + Each node in \p from and \p to must belong to \p hGraph. + + If \p numDependencies is 0, elements in \p from and \p to will be ignored. + Specifying an existing dependency will return an error. + + \param hGraph - Graph to which dependencies are added + \param from - Array of nodes that provide the dependencies + \param to - Array of dependent nodes + \param numDependencies - Number of dependencies to be added + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphRemoveDependencies, + ::cuGraphGetEdges, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphAddDependencies( + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + numDependencies: usize, + ) -> cuda_types::CUresult; + /** \brief Adds dependency edges to a graph (12.3+) + + The number of dependencies to be added is defined by \p numDependencies + Elements in \p from and \p to at corresponding indices define a dependency. + Each node in \p from and \p to must belong to \p hGraph. + + If \p numDependencies is 0, elements in \p from and \p to will be ignored. + Specifying an existing dependency will return an error. + + \param hGraph - Graph to which dependencies are added + \param from - Array of nodes that provide the dependencies + \param to - Array of dependent nodes + \param edgeData - Optional array of edge data. If NULL, default (zeroed) edge data is assumed. + \param numDependencies - Number of dependencies to be added + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphRemoveDependencies, + ::cuGraphGetEdges, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphAddDependencies_v2( + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + edgeData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + ) -> cuda_types::CUresult; + /** \brief Removes dependency edges from a graph + + The number of \p dependencies to be removed is defined by \p numDependencies. + Elements in \p from and \p to at corresponding indices define a dependency. + Each node in \p from and \p to must belong to \p hGraph. + + If \p numDependencies is 0, elements in \p from and \p to will be ignored. + Specifying a non-existing dependency will return an error. + + Dependencies cannot be removed from graphs which contain allocation or free nodes. + Any attempt to do so will return an error. + + \param hGraph - Graph from which to remove dependencies + \param from - Array of nodes that provide the dependencies + \param to - Array of dependent nodes + \param numDependencies - Number of dependencies to be removed + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddDependencies, + ::cuGraphGetEdges, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphRemoveDependencies( + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + numDependencies: usize, + ) -> cuda_types::CUresult; + /** \brief Removes dependency edges from a graph (12.3+) + + The number of \p dependencies to be removed is defined by \p numDependencies. + Elements in \p from and \p to at corresponding indices define a dependency. + Each node in \p from and \p to must belong to \p hGraph. + + If \p numDependencies is 0, elements in \p from and \p to will be ignored. + Specifying an edge that does not exist in the graph, with data matching + \p edgeData, results in an error. \p edgeData is nullable, which is equivalent + to passing default (zeroed) data for each edge. + + Dependencies cannot be removed from graphs which contain allocation or free nodes. + Any attempt to do so will return an error. + + \param hGraph - Graph from which to remove dependencies + \param from - Array of nodes that provide the dependencies + \param to - Array of dependent nodes + \param edgeData - Optional array of edge data. If NULL, edge data is assumed to + be default (zeroed). + \param numDependencies - Number of dependencies to be removed + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddDependencies, + ::cuGraphGetEdges, + ::cuGraphNodeGetDependencies, + ::cuGraphNodeGetDependentNodes*/ + fn cuGraphRemoveDependencies_v2( + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + edgeData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + ) -> cuda_types::CUresult; + /** \brief Remove a node from the graph + + Removes \p hNode from its graph. This operation also severs any dependencies of other nodes + on \p hNode and vice versa. + + Nodes which belong to a graph which contains allocation or free nodes cannot be destroyed. + Any attempt to do so will return an error. + + \param hNode - Node to remove + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddChildGraphNode, + ::cuGraphAddEmptyNode, + ::cuGraphAddKernelNode, + ::cuGraphAddHostNode, + ::cuGraphAddMemcpyNode, + ::cuGraphAddMemsetNode*/ + fn cuGraphDestroyNode(hNode: cuda_types::CUgraphNode) -> cuda_types::CUresult; + /** \brief Creates an executable graph from a graph + + Instantiates \p hGraph as an executable graph. The graph is validated for any + structural constraints or intra-node constraints which were not previously + validated. If instantiation is successful, a handle to the instantiated graph + is returned in \p phGraphExec. + + The \p flags parameter controls the behavior of instantiation and subsequent + graph launches. Valid flags are: + + - ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which configures a + graph containing memory allocation nodes to automatically free any + unfreed memory allocations before the graph is relaunched. + + - ::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which configures the graph for launch + from the device. If this flag is passed, the executable graph handle returned can be + used to launch the graph from both the host and device. This flag can only be used + on platforms which support unified addressing. This flag cannot be used in + conjunction with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + + - ::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which causes the graph + to use the priorities from the per-node attributes rather than the priority + of the launch stream during execution. Note that priorities are only available + on kernel nodes, and are copied from stream priority during stream capture. + + If \p hGraph contains any allocation or free nodes, there can be at most one + executable graph in existence for that graph at a time. An attempt to instantiate + a second executable graph before destroying the first with ::cuGraphExecDestroy + will result in an error. + The same also applies if \p hGraph contains any device-updatable kernel nodes. + + If \p hGraph contains kernels which call device-side cudaGraphLaunch() from multiple + contexts, this will result in an error. + + Graphs instantiated for launch on the device have additional restrictions which do not + apply to host graphs: + + - The graph's nodes must reside on a single context. + - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + - The graph cannot be empty and must contain at least one kernel, memcpy, or memset node. + Operation-specific restrictions are outlined below. + - Kernel nodes: + - Use of CUDA Dynamic Parallelism is not permitted. + - Cooperative launches are permitted as long as MPS is not in use. + - Memcpy nodes: + - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + - Copies involving CUDA arrays are not permitted. + - Both operands must be accessible from the current context, and the current context must + match the context of other nodes in the graph. + + \param phGraphExec - Returns instantiated graph + \param hGraph - Graph to instantiate + \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphInstantiate, + ::cuGraphCreate, + ::cuGraphUpload, + ::cuGraphLaunch, + ::cuGraphExecDestroy*/ + fn cuGraphInstantiateWithFlags( + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + flags: ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + /** \brief Creates an executable graph from a graph + + Instantiates \p hGraph as an executable graph according to the \p instantiateParams structure. + The graph is validated for any structural constraints or intra-node constraints + which were not previously validated. If instantiation is successful, a handle to + the instantiated graph is returned in \p phGraphExec. + + \p instantiateParams controls the behavior of instantiation and subsequent + graph launches, as well as returning more detailed information in the event of an error. + ::CUDA_GRAPH_INSTANTIATE_PARAMS is defined as: + + \code +typedef struct { +cuuint64_t flags; +CUstream hUploadStream; +CUgraphNode hErrNode_out; +CUgraphInstantiateResult result_out; +} CUDA_GRAPH_INSTANTIATE_PARAMS; + \endcode + + The \p flags field controls the behavior of instantiation and subsequent + graph launches. Valid flags are: + + - ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which configures a + graph containing memory allocation nodes to automatically free any + unfreed memory allocations before the graph is relaunched. + + - ::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD, which will perform an upload of the graph + into \p hUploadStream once the graph has been instantiated. + + - ::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which configures the graph for launch + from the device. If this flag is passed, the executable graph handle returned can be + used to launch the graph from both the host and device. This flag can only be used + on platforms which support unified addressing. This flag cannot be used in + conjunction with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + + - ::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which causes the graph + to use the priorities from the per-node attributes rather than the priority + of the launch stream during execution. Note that priorities are only available + on kernel nodes, and are copied from stream priority during stream capture. + + If \p hGraph contains any allocation or free nodes, there can be at most one + executable graph in existence for that graph at a time. An attempt to instantiate a + second executable graph before destroying the first with ::cuGraphExecDestroy will + result in an error. + The same also applies if \p hGraph contains any device-updatable kernel nodes. + + If \p hGraph contains kernels which call device-side cudaGraphLaunch() from multiple + contexts, this will result in an error. + + Graphs instantiated for launch on the device have additional restrictions which do not + apply to host graphs: + + - The graph's nodes must reside on a single context. + - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + - The graph cannot be empty and must contain at least one kernel, memcpy, or memset node. + Operation-specific restrictions are outlined below. + - Kernel nodes: + - Use of CUDA Dynamic Parallelism is not permitted. + - Cooperative launches are permitted as long as MPS is not in use. + - Memcpy nodes: + - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + - Copies involving CUDA arrays are not permitted. + - Both operands must be accessible from the current context, and the current context must + match the context of other nodes in the graph. + + In the event of an error, the \p result_out and \p hErrNode_out fields will contain more + information about the nature of the error. Possible error reporting includes: + + - ::CUDA_GRAPH_INSTANTIATE_ERROR, if passed an invalid value or if an unexpected error occurred + which is described by the return value of the function. \p hErrNode_out will be set to NULL. + - ::CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE, if the graph structure is invalid. \p hErrNode_out + will be set to one of the offending nodes. + - ::CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED, if the graph is instantiated for device + launch but contains a node of an unsupported node type, or a node which performs unsupported + operations, such as use of CUDA dynamic parallelism within a kernel node. \p hErrNode_out will + be set to this node. + - ::CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED, if the graph is instantiated for device + launch but a node’s context differs from that of another node. This error can also be returned + if a graph is not instantiated for device launch and it contains kernels which call device-side + cudaGraphLaunch() from multiple contexts. \p hErrNode_out will be set to this node. + + If instantiation is successful, \p result_out will be set to ::CUDA_GRAPH_INSTANTIATE_SUCCESS, + and \p hErrNode_out will be set to NULL. + + \param phGraphExec - Returns instantiated graph + \param hGraph - Graph to instantiate + \param instantiateParams - Instantiation parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphCreate, + ::cuGraphInstantiate, + ::cuGraphExecDestroy*/ + fn cuGraphInstantiateWithParams_ptsz( + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Query the instantiation flags of an executable graph + + Returns the flags that were passed to instantiation for the given executable graph. + ::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD will not be returned by this API as it does + not affect the resulting executable graph. + + \param hGraphExec - The executable graph to query + \param flags - Returns the instantiation flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphInstantiate, + ::cuGraphInstantiateWithParams*/ + fn cuGraphExecGetFlags( + hGraphExec: cuda_types::CUgraphExec, + flags: *mut cuda_types::cuuint64_t, + ) -> cuda_types::CUresult; + /** \brief Sets the parameters for a kernel node in the given graphExec + + Sets the parameters of a kernel node in an executable graph \p hGraphExec. + The node is identified by the corresponding node \p hNode in the + non-executable graph, from which the executable graph was instantiated. + + \p hNode must not have been removed from the original graph. All \p nodeParams + fields may change, but the following restrictions apply to \p func updates: + + - The owning context of the function cannot change. + - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + to a function which uses CDP + - A node whose function originally did not make device-side update calls cannot be updated + to a function which makes device-side update calls. + - If \p hGraphExec was not instantiated for device launch, a node whose function originally + did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + device-side cudaGraphLaunch() unless the node resides on the same context as nodes which + contained such calls at instantiate-time. If no such calls were present at instantiation, + these updates cannot be performed at all. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + If \p hNode is a device-updatable kernel node, the next upload/launch of \p hGraphExec + will overwrite any previous device-side updates. Additionally, applying host updates to a + device-updatable kernel node while it is being updated from the device will result in + undefined behavior. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - kernel node from the graph from which graphExec was instantiated + \param nodeParams - Updated Parameters to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddKernelNode, + ::cuGraphKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecKernelNodeSetParams_v2( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets the parameters for a memcpy node in the given graphExec. + + Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + contained \p copyParams at instantiation. hNode must remain in the graph which was + used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + + The source and destination memory in \p copyParams must be allocated from the same + contexts as the original source and destination memory. Both the instantiation-time + memory operands and the memory operands in \p copyParams must be 1-dimensional. + Zero-length operations are not supported. + + The modifications only affect future launches of \p hGraphExec. Already enqueued + or running launches of \p hGraphExec are not affected by this call. hNode is also + not modified by this call. + + Returns CUDA_ERROR_INVALID_VALUE if the memory operands' mappings changed or + either the original or new memory operands are multidimensional. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - Memcpy node from the graph which was used to instantiate graphExec + \param copyParams - The updated parameters to set + \param ctx - Context on which to run the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddMemcpyNode, + ::cuGraphMemcpyNodeSetParams, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecMemcpyNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + copyParams: *const cuda_types::CUDA_MEMCPY3D, + ctx: cuda_types::CUcontext, + ) -> cuda_types::CUresult; + /** \brief Sets the parameters for a memset node in the given graphExec. + + Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + contained \p memsetParams at instantiation. hNode must remain in the graph which was + used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + + The destination memory in \p memsetParams must be allocated from the same + contexts as the original destination memory. Both the instantiation-time + memory operand and the memory operand in \p memsetParams must be 1-dimensional. + Zero-length operations are not supported. + + The modifications only affect future launches of \p hGraphExec. Already enqueued + or running launches of \p hGraphExec are not affected by this call. hNode is also + not modified by this call. + + Returns CUDA_ERROR_INVALID_VALUE if the memory operand's mappings changed or + either the original or new memory operand are multidimensional. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - Memset node from the graph which was used to instantiate graphExec + \param memsetParams - The updated parameters to set + \param ctx - Context on which to run the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddMemsetNode, + ::cuGraphMemsetNodeSetParams, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecMemsetNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS, + ctx: cuda_types::CUcontext, + ) -> cuda_types::CUresult; + /** \brief Sets the parameters for a host node in the given graphExec. + + Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + contained \p nodeParams at instantiation. hNode must remain in the graph which was + used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + + The modifications only affect future launches of \p hGraphExec. Already enqueued + or running launches of \p hGraphExec are not affected by this call. hNode is also + not modified by this call. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - Host node from the graph which was used to instantiate graphExec + \param nodeParams - The updated parameters to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddHostNode, + ::cuGraphHostNodeSetParams, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecHostNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Updates node parameters in the child graph node in the given graphExec. + + Updates the work represented by \p hNode in \p hGraphExec as though the nodes contained + in \p hNode's graph had the parameters contained in \p childGraph's nodes at instantiation. + \p hNode must remain in the graph which was used to instantiate \p hGraphExec. + Changed edges to and from \p hNode are ignored. + + The modifications only affect future launches of \p hGraphExec. Already enqueued + or running launches of \p hGraphExec are not affected by this call. \p hNode is also + not modified by this call. + + The topology of \p childGraph, as well as the node insertion order, must match that + of the graph contained in \p hNode. See ::cuGraphExecUpdate() for a list of restrictions + on what can be updated in an instantiated graph. The update is recursive, so child graph + nodes contained within the top level child graph will also be updated. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - Host node from the graph which was used to instantiate graphExec + \param childGraph - The graph supplying the updated parameters + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddChildGraphNode, + ::cuGraphChildGraphNodeGetGraph, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecChildGraphNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + childGraph: cuda_types::CUgraph, + ) -> cuda_types::CUresult; + /** \brief Sets the event for an event record node in the given graphExec + + Sets the event of an event record node in an executable graph \p hGraphExec. + The node is identified by the corresponding node \p hNode in the + non-executable graph, from which the executable graph was instantiated. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - event record node from the graph from which graphExec was instantiated + \param event - Updated event to use + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddEventRecordNode, + ::cuGraphEventRecordNodeGetEvent, + ::cuGraphEventWaitNodeSetEvent, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecEventRecordNodeSetEvent( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Sets the event for an event wait node in the given graphExec + + Sets the event of an event wait node in an executable graph \p hGraphExec. + The node is identified by the corresponding node \p hNode in the + non-executable graph, from which the executable graph was instantiated. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - event wait node from the graph from which graphExec was instantiated + \param event - Updated event to use + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddEventWaitNode, + ::cuGraphEventWaitNodeGetEvent, + ::cuGraphEventRecordNodeSetEvent, + ::cuEventRecordWithFlags, + ::cuStreamWaitEvent, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecEventWaitNodeSetEvent( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Sets the parameters for an external semaphore signal node in the given graphExec + + Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec. + The node is identified by the corresponding node \p hNode in the + non-executable graph, from which the executable graph was instantiated. + + \p hNode must not have been removed from the original graph. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + Changing \p nodeParams->numExtSems is not supported. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - semaphore signal node from the graph from which graphExec was instantiated + \param nodeParams - Updated Parameters to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddExternalSemaphoresSignalNode, + ::cuImportExternalSemaphore, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecExternalSemaphoresSignalNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Sets the parameters for an external semaphore wait node in the given graphExec + + Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec. + The node is identified by the corresponding node \p hNode in the + non-executable graph, from which the executable graph was instantiated. + + \p hNode must not have been removed from the original graph. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + Changing \p nodeParams->numExtSems is not supported. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - semaphore wait node from the graph from which graphExec was instantiated + \param nodeParams - Updated Parameters to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphExecNodeSetParams, + ::cuGraphAddExternalSemaphoresWaitNode, + ::cuImportExternalSemaphore, + ::cuSignalExternalSemaphoresAsync, + ::cuWaitExternalSemaphoresAsync, + ::cuGraphExecKernelNodeSetParams, + ::cuGraphExecMemcpyNodeSetParams, + ::cuGraphExecMemsetNodeSetParams, + ::cuGraphExecHostNodeSetParams, + ::cuGraphExecChildGraphNodeSetParams, + ::cuGraphExecEventRecordNodeSetEvent, + ::cuGraphExecEventWaitNodeSetEvent, + ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecExternalSemaphoresWaitNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, + ) -> cuda_types::CUresult; + /** \brief Enables or disables the specified node in the given graphExec + + Sets \p hNode to be either enabled or disabled. Disabled nodes are functionally equivalent + to empty nodes until they are reenabled. Existing node parameters are not affected by + disabling/enabling the node. + + The node is identified by the corresponding node \p hNode in the non-executable + graph, from which the executable graph was instantiated. + + \p hNode must not have been removed from the original graph. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + If \p hNode is a device-updatable kernel node, the next upload/launch of \p hGraphExec + will overwrite any previous device-side updates. Additionally, applying host updates to a + device-updatable kernel node while it is being updated from the device will result in + undefined behavior. + + \note Currently only kernel, memset and memcpy nodes are supported. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - Node from the graph from which graphExec was instantiated + \param isEnabled - Node is enabled if != 0, otherwise the node is disabled + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeGetEnabled, + ::cuGraphExecUpdate, + ::cuGraphInstantiate + ::cuGraphLaunch*/ + fn cuGraphNodeSetEnabled( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + isEnabled: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Query whether a node in the given graphExec is enabled + + Sets isEnabled to 1 if \p hNode is enabled, or 0 if \p hNode is disabled. + + The node is identified by the corresponding node \p hNode in the non-executable + graph, from which the executable graph was instantiated. + + \p hNode must not have been removed from the original graph. + + \note Currently only kernel, memset and memcpy nodes are supported. + \note This function will not reflect device-side updates for device-updatable kernel nodes. + + \param hGraphExec - The executable graph in which to set the specified node + \param hNode - Node from the graph from which graphExec was instantiated + \param isEnabled - Location to return the enabled status of the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphNodeSetEnabled, + ::cuGraphExecUpdate, + ::cuGraphInstantiate + ::cuGraphLaunch*/ + fn cuGraphNodeGetEnabled( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + isEnabled: *mut ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Uploads an executable graph in a stream + + Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of + the same \p hGraphExec will be serialized. Each upload is ordered behind both any + previous work in \p hStream and any previous launches of \p hGraphExec. + Uses memory cached by \p stream to back the allocations owned by \p hGraphExec. + + \param hGraphExec - Executable graph to upload + \param hStream - Stream in which to upload the graph + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphInstantiate, + ::cuGraphLaunch, + ::cuGraphExecDestroy*/ + fn cuGraphUpload_ptsz( + hGraphExec: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Launches an executable graph in a stream + + Executes \p hGraphExec in \p hStream. Only one instance of \p hGraphExec may be executing + at a time. Each launch is ordered behind both any previous work in \p hStream + and any previous launches of \p hGraphExec. To execute a graph concurrently, it must be + instantiated multiple times into multiple executable graphs. + + If any allocations created by \p hGraphExec remain unfreed (from a previous launch) and + \p hGraphExec was not instantiated with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, + the launch will fail with ::CUDA_ERROR_INVALID_VALUE. + + \param hGraphExec - Executable graph to launch + \param hStream - Stream in which to launch the graph + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphInstantiate, + ::cuGraphUpload, + ::cuGraphExecDestroy*/ + fn cuGraphLaunch_ptsz( + hGraphExec: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Destroys an executable graph + + Destroys the executable graph specified by \p hGraphExec, as well + as all of its executable nodes. If the executable graph is + in-flight, it will not be terminated, but rather freed + asynchronously on completion. + + \param hGraphExec - Executable graph to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphInstantiate, + ::cuGraphUpload, + ::cuGraphLaunch*/ + fn cuGraphExecDestroy(hGraphExec: cuda_types::CUgraphExec) -> cuda_types::CUresult; + /** \brief Destroys a graph + + Destroys the graph specified by \p hGraph, as well as all of its nodes. + + \param hGraph - Graph to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphCreate*/ + fn cuGraphDestroy(hGraph: cuda_types::CUgraph) -> cuda_types::CUresult; + /** \brief Check whether an executable graph can be updated with a graph and perform the update if possible + + Updates the node parameters in the instantiated graph specified by \p hGraphExec with the + node parameters in a topologically identical graph specified by \p hGraph. + + Limitations: + + - Kernel nodes: + - The owning context of the function cannot change. + - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + to a function which uses CDP. + - A node whose function originally did not make device-side update calls cannot be updated + to a function which makes device-side update calls. + - A cooperative node cannot be updated to a non-cooperative node, and vice-versa. + - If the graph was instantiated with CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, the + priority attribute cannot change. Equality is checked on the originally requested + priority values, before they are clamped to the device's supported range. + - If \p hGraphExec was not instantiated for device launch, a node whose function originally + did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + device-side cudaGraphLaunch() unless the node resides on the same context as nodes which + contained such calls at instantiate-time. If no such calls were present at instantiation, + these updates cannot be performed at all. + - Neither \p hGraph nor \p hGraphExec may contain device-updatable kernel nodes. + - Memset and memcpy nodes: + - The CUDA device(s) to which the operand(s) was allocated/mapped cannot change. + - The source/destination memory must be allocated from the same contexts as the original + source/destination memory. + - Only 1D memsets can be changed. + - Additional memcpy node restrictions: + - Changing either the source or destination memory type(i.e. CU_MEMORYTYPE_DEVICE, + CU_MEMORYTYPE_ARRAY, etc.) is not supported. + - External semaphore wait nodes and record nodes: + - Changing the number of semaphores is not supported. + - Conditional nodes: + - Changing node parameters is not supported. + - Changeing parameters of nodes within the conditional body graph is subject to the rules above. + - Conditional handle flags and default values are updated as part of the graph update. + + Note: The API may add further restrictions in future releases. The return code should always be checked. + + cuGraphExecUpdate sets the result member of \p resultInfo to CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED + under the following conditions: + - The count of nodes directly in \p hGraphExec and \p hGraph differ, in which case resultInfo->errorNode + is set to NULL. + - \p hGraph has more exit nodes than \p hGraph, in which case resultInfo->errorNode is set to one of + the exit nodes in hGraph. + - A node in \p hGraph has a different number of dependencies than the node from \p hGraphExec it is paired with, + in which case resultInfo->errorNode is set to the node from \p hGraph. + - A node in \p hGraph has a dependency that does not match with the corresponding dependency of the paired node + from \p hGraphExec. resultInfo->errorNode will be set to the node from \p hGraph. resultInfo->errorFromNode + will be set to the mismatched dependency. The dependencies are paired based on edge order and a dependency + does not match when the nodes are already paired based on other edges examined in the graph. + + cuGraphExecUpdate sets the result member of \p resultInfo to: + - CU_GRAPH_EXEC_UPDATE_ERROR if passed an invalid value. + - CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED if the graph topology changed + - CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED if the type of a node changed, in which case + \p hErrorNode_out is set to the node from \p hGraph. + - CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE if the function changed in an unsupported + way(see note above), in which case \p hErrorNode_out is set to the node from \p hGraph + - CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED if any parameters to a node changed in a way + that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph. + - CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED if any attributes of a node changed in a way + that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph. + - CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED if something about a node is unsupported, like + the node's type or configuration, in which case \p hErrorNode_out is set to the node from \p hGraph + + If the update fails for a reason not listed above, the result member of \p resultInfo will be set + to CU_GRAPH_EXEC_UPDATE_ERROR. If the update succeeds, the result member will be set to CU_GRAPH_EXEC_UPDATE_SUCCESS. + + cuGraphExecUpdate returns CUDA_SUCCESS when the updated was performed successfully. It returns + CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE if the graph update was not performed because it included + changes which violated constraints specific to instantiated graph update. + + \param hGraphExec The instantiated graph to be updated + \param hGraph The graph containing the updated parameters + \param resultInfo the error info structure + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE, + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphInstantiate*/ + fn cuGraphExecUpdate_v2( + hGraphExec: cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + resultInfo: *mut cuda_types::CUgraphExecUpdateResultInfo, + ) -> cuda_types::CUresult; + /** \brief Copies attributes from source node to destination node. + + Copies attributes from source node \p src to destination node \p dst. + Both node must have the same context. + + \param[out] dst Destination node + \param[in] src Source node + For list of attributes see ::CUkernelNodeAttrID + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::CUaccessPolicyWindow*/ + fn cuGraphKernelNodeCopyAttributes( + dst: cuda_types::CUgraphNode, + src: cuda_types::CUgraphNode, + ) -> cuda_types::CUresult; + /** \brief Queries node attribute. + + Queries attribute \p attr from node \p hNode and stores it in corresponding + member of \p value_out. + + \param[in] hNode + \param[in] attr + \param[out] value_out + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa + ::CUaccessPolicyWindow*/ + fn cuGraphKernelNodeGetAttribute( + hNode: cuda_types::CUgraphNode, + attr: cuda_types::CUkernelNodeAttrID, + value_out: *mut cuda_types::CUkernelNodeAttrValue, + ) -> cuda_types::CUresult; + /** \brief Sets node attribute. + + Sets attribute \p attr on node \p hNode from corresponding attribute of + \p value. + + \param[out] hNode + \param[in] attr + \param[out] value + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE + \notefnerr + + \sa + ::CUaccessPolicyWindow*/ + fn cuGraphKernelNodeSetAttribute( + hNode: cuda_types::CUgraphNode, + attr: cuda_types::CUkernelNodeAttrID, + value: *const cuda_types::CUkernelNodeAttrValue, + ) -> cuda_types::CUresult; + /** \brief Write a DOT file describing graph structure + + Using the provided \p hGraph, write to \p path a DOT formatted description of the graph. + By default this includes the graph topology, node types, node id, kernel names and memcpy direction. + \p flags can be specified to write more detailed information about each node type such as + parameter values, kernel attributes, node and function handles. + + \param hGraph - The graph to create a DOT file from + \param path - The path to write the DOT file to + \param flags - Flags from CUgraphDebugDot_flags for specifying which additional node information to write + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OPERATING_SYSTEM*/ + fn cuGraphDebugDotPrint( + hGraph: cuda_types::CUgraph, + path: *const ::core::ffi::c_char, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Create a user object + + Create a user object with the specified destructor callback and initial reference count. The + initial references are owned by the caller. + + Destructor callbacks cannot make CUDA API calls and should avoid blocking behavior, as they + are executed by a shared internal thread. Another thread may be signaled to perform such + actions, if it does not block forward progress of tasks scheduled through CUDA. + + See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + + \param object_out - Location to return the user object handle + \param ptr - The pointer to pass to the destroy function + \param destroy - Callback to free the user object when it is no longer in use + \param initialRefcount - The initial refcount to create the object with, typically 1. The + initial references are owned by the calling thread. + \param flags - Currently it is required to pass ::CU_USER_OBJECT_NO_DESTRUCTOR_SYNC, + which is the only defined flag. This indicates that the destroy + callback cannot be waited on by any CUDA API. Users requiring + synchronization of the callback should signal its completion + manually. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuUserObjectRetain, + ::cuUserObjectRelease, + ::cuGraphRetainUserObject, + ::cuGraphReleaseUserObject, + ::cuGraphCreate*/ + fn cuUserObjectCreate( + object_out: *mut cuda_types::CUuserObject, + ptr: *mut ::core::ffi::c_void, + destroy: cuda_types::CUhostFn, + initialRefcount: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Retain a reference to a user object + + Retains new references to a user object. The new references are owned by the caller. + + See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + + \param object - The object to retain + \param count - The number of references to retain, typically 1. Must be nonzero + and not larger than INT_MAX. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuUserObjectCreate, + ::cuUserObjectRelease, + ::cuGraphRetainUserObject, + ::cuGraphReleaseUserObject, + ::cuGraphCreate*/ + fn cuUserObjectRetain( + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Release a reference to a user object + + Releases user object references owned by the caller. The object's destructor is invoked if + the reference count reaches zero. + + It is undefined behavior to release references not owned by the caller, or to use a user + object handle after all references are released. + + See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + + \param object - The object to release + \param count - The number of references to release, typically 1. Must be nonzero + and not larger than INT_MAX. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuUserObjectCreate, + ::cuUserObjectRetain, + ::cuGraphRetainUserObject, + ::cuGraphReleaseUserObject, + ::cuGraphCreate*/ + fn cuUserObjectRelease( + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Retain a reference to a user object from a graph + + Creates or moves user object references that will be owned by a CUDA graph. + + See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + + \param graph - The graph to associate the reference with + \param object - The user object to retain a reference for + \param count - The number of references to add to the graph, typically 1. Must be + nonzero and not larger than INT_MAX. + \param flags - The optional flag ::CU_GRAPH_USER_OBJECT_MOVE transfers references + from the calling thread, rather than create new references. Pass 0 + to create new references. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuUserObjectCreate, + ::cuUserObjectRetain, + ::cuUserObjectRelease, + ::cuGraphReleaseUserObject, + ::cuGraphCreate*/ + fn cuGraphRetainUserObject( + graph: cuda_types::CUgraph, + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Release a user object reference from a graph + + Releases user object references owned by a graph. + + See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + + \param graph - The graph that will release the reference + \param object - The user object to release a reference for + \param count - The number of references to release, typically 1. Must be nonzero + and not larger than INT_MAX. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuUserObjectCreate, + ::cuUserObjectRetain, + ::cuUserObjectRelease, + ::cuGraphRetainUserObject, + ::cuGraphCreate*/ + fn cuGraphReleaseUserObject( + graph: cuda_types::CUgraph, + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Adds a node of arbitrary type to a graph + + Creates a new node in \p hGraph described by \p nodeParams with \p numDependencies + dependencies specified via \p dependencies. \p numDependencies may be 0. + \p dependencies may be null if \p numDependencies is 0. \p dependencies may not have + any duplicate entries. + + \p nodeParams is a tagged union. The node type should be specified in the \p type field, + and type-specific parameters in the corresponding union member. All unused bytes - that + is, \p reserved0 and all bytes past the utilized union member - must be set to zero. + It is recommended to use brace initialization or memset to ensure all bytes are + initialized. + + Note that for some node types, \p nodeParams may contain "out parameters" which are + modified during the call, such as \p nodeParams->alloc.dptr. + + A handle to the new node will be returned in \p phGraphNode. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param numDependencies - Number of dependencies + \param nodeParams - Specification of the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_NOT_SUPPORTED + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphCreate, + ::cuGraphNodeSetParams, + ::cuGraphExecNodeSetParams*/ + fn cuGraphAddNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *mut cuda_types::CUgraphNodeParams, + ) -> cuda_types::CUresult; + /** \brief Adds a node of arbitrary type to a graph (12.3+) + + Creates a new node in \p hGraph described by \p nodeParams with \p numDependencies + dependencies specified via \p dependencies. \p numDependencies may be 0. + \p dependencies may be null if \p numDependencies is 0. \p dependencies may not have + any duplicate entries. + + \p nodeParams is a tagged union. The node type should be specified in the \p type field, + and type-specific parameters in the corresponding union member. All unused bytes - that + is, \p reserved0 and all bytes past the utilized union member - must be set to zero. + It is recommended to use brace initialization or memset to ensure all bytes are + initialized. + + Note that for some node types, \p nodeParams may contain "out parameters" which are + modified during the call, such as \p nodeParams->alloc.dptr. + + A handle to the new node will be returned in \p phGraphNode. + + \param phGraphNode - Returns newly created node + \param hGraph - Graph to which to add the node + \param dependencies - Dependencies of the node + \param dependencyData - Optional edge data for the dependencies. If NULL, the data is + assumed to be default (zeroed) for all dependencies. + \param numDependencies - Number of dependencies + \param nodeParams - Specification of the node + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_NOT_SUPPORTED + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphCreate, + ::cuGraphNodeSetParams, + ::cuGraphExecNodeSetParams*/ + fn cuGraphAddNode_v2( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + nodeParams: *mut cuda_types::CUgraphNodeParams, + ) -> cuda_types::CUresult; + /** \brief Update's a graph node's parameters + + Sets the parameters of graph node \p hNode to \p nodeParams. The node type specified by + \p nodeParams->type must match the type of \p hNode. \p nodeParams must be fully + initialized and all unused bytes (reserved, padding) zeroed. + + Modifying parameters is not supported for node types CU_GRAPH_NODE_TYPE_MEM_ALLOC and + CU_GRAPH_NODE_TYPE_MEM_FREE. + + \param hNode - Node to set the parameters for + \param nodeParams - Parameters to copy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphExecNodeSetParams*/ + fn cuGraphNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUgraphNodeParams, + ) -> cuda_types::CUresult; + /** \brief Update's a graph node's parameters in an instantiated graph + + Sets the parameters of a node in an executable graph \p hGraphExec. The node is identified + by the corresponding node \p hNode in the non-executable graph from which the executable + graph was instantiated. \p hNode must not have been removed from the original graph. + + The modifications only affect future launches of \p hGraphExec. Already + enqueued or running launches of \p hGraphExec are not affected by this call. + \p hNode is also not modified by this call. + + Allowed changes to parameters on executable graphs are as follows: + +
Node typeAllowed changes +
kernelSee ::cuGraphExecKernelNodeSetParams +
memcpyAddresses for 1-dimensional copies if allocated in same context; see ::cuGraphExecMemcpyNodeSetParams +
memsetAddresses for 1-dimensional memsets if allocated in same context; see ::cuGraphExecMemsetNodeSetParams +
hostUnrestricted +
child graphTopology must match and restrictions apply recursively; see ::cuGraphExecUpdate +
event waitUnrestricted +
event recordUnrestricted +
external semaphore signalNumber of semaphore operations cannot change +
external semaphore waitNumber of semaphore operations cannot change +
memory allocationAPI unsupported +
memory freeAPI unsupported +
batch memopsAddresses, values, and operation type for wait operations; see ::cuGraphExecBatchMemOpNodeSetParams +
+ + \param hGraphExec - The executable graph in which to update the specified node + \param hNode - Corresponding node from the graph from which graphExec was instantiated + \param nodeParams - Updated Parameters to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode, + ::cuGraphNodeSetParams + ::cuGraphExecUpdate, + ::cuGraphInstantiate*/ + fn cuGraphExecNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUgraphNodeParams, + ) -> cuda_types::CUresult; + /** \brief Create a conditional handle + + Creates a conditional handle associated with \p hGraph. + + The conditional handle must be associated with a conditional node in this graph or one of its children. + + Handles not associated with a conditional node may cause graph instantiation to fail. + + Handles can only be set from the context with which they are associated. + + \param pHandle_out - Pointer used to return the handle to the caller. + \param hGraph - Graph which will contain the conditional node using this handle. + \param ctx - Context for the handle and associated conditional node. + \param defaultLaunchValue - Optional initial value for the conditional variable. + \param flags - Currently must be CU_GRAPH_COND_ASSIGN_DEFAULT or 0. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \note_graph_thread_safety + \notefnerr + + \sa + ::cuGraphAddNode*/ + fn cuGraphConditionalHandleCreate( + pHandle_out: *mut cuda_types::CUgraphConditionalHandle, + hGraph: cuda_types::CUgraph, + ctx: cuda_types::CUcontext, + defaultLaunchValue: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Returns occupancy of a function + + Returns in \p *numBlocks the number of the maximum active blocks per + streaming multiprocessor. + + \param numBlocks - Returned occupancy + \param func - Kernel for which occupancy is calculated + \param blockSize - Block size the kernel is intended to be launched with + \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cudaOccupancyMaxActiveBlocksPerMultiprocessor*/ + fn cuOccupancyMaxActiveBlocksPerMultiprocessor( + numBlocks: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSize: ::core::ffi::c_int, + dynamicSMemSize: usize, + ) -> cuda_types::CUresult; + /** \brief Returns occupancy of a function + + Returns in \p *numBlocks the number of the maximum active blocks per + streaming multiprocessor. + + The \p Flags parameter controls how special cases are handled. The + valid flags are: + + - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as + ::cuOccupancyMaxActiveBlocksPerMultiprocessor; + + - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the + default behavior on platform where global caching affects + occupancy. On such platforms, if caching is enabled, but + per-block SM resource usage would result in zero occupancy, the + occupancy calculator will calculate the occupancy as if caching + is disabled. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE makes + the occupancy calculator to return 0 in such cases. More information + can be found about this feature in the "Unified L1/Texture Cache" + section of the Maxwell tuning guide. + + \param numBlocks - Returned occupancy + \param func - Kernel for which occupancy is calculated + \param blockSize - Block size the kernel is intended to be launched with + \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + \param flags - Requested behavior for the occupancy calculator + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags*/ + fn cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + numBlocks: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSize: ::core::ffi::c_int, + dynamicSMemSize: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Suggest a launch configuration with reasonable occupancy + + Returns in \p *blockSize a reasonable block size that can achieve + the maximum occupancy (or, the maximum number of active warps with + the fewest blocks per multiprocessor), and in \p *minGridSize the + minimum grid size to achieve the maximum occupancy. + + If \p blockSizeLimit is 0, the configurator will use the maximum + block size permitted by the device / function instead. + + If per-block dynamic shared memory allocation is not needed, the + user should leave both \p blockSizeToDynamicSMemSize and \p + dynamicSMemSize as 0. + + If per-block dynamic shared memory allocation is needed, then if + the dynamic shared memory size is constant regardless of block + size, the size should be passed through \p dynamicSMemSize, and \p + blockSizeToDynamicSMemSize should be NULL. + + Otherwise, if the per-block dynamic shared memory size varies with + different block sizes, the user needs to provide a unary function + through \p blockSizeToDynamicSMemSize that computes the dynamic + shared memory needed by \p func for any given block size. \p + dynamicSMemSize is ignored. An example signature is: + + \code + // Take block size, returns dynamic shared memory needed + size_t blockToSmem(int blockSize); + \endcode + + \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy + \param blockSize - Returned maximum block size that can achieve the maximum occupancy + \param func - Kernel for which launch configuration is calculated + \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size + \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes + \param blockSizeLimit - The maximum block size \p func is designed to handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cudaOccupancyMaxPotentialBlockSize*/ + fn cuOccupancyMaxPotentialBlockSize( + minGridSize: *mut ::core::ffi::c_int, + blockSize: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize, + dynamicSMemSize: usize, + blockSizeLimit: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Suggest a launch configuration with reasonable occupancy + + An extended version of ::cuOccupancyMaxPotentialBlockSize. In + addition to arguments passed to ::cuOccupancyMaxPotentialBlockSize, + ::cuOccupancyMaxPotentialBlockSizeWithFlags also takes a \p Flags + parameter. + + The \p Flags parameter controls how special cases are handled. The + valid flags are: + + - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as + ::cuOccupancyMaxPotentialBlockSize; + + - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the + default behavior on platform where global caching affects + occupancy. On such platforms, the launch configurations that + produces maximal occupancy might not support global + caching. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE + guarantees that the the produced launch configuration is global + caching compatible at a potential cost of occupancy. More information + can be found about this feature in the "Unified L1/Texture Cache" + section of the Maxwell tuning guide. + + \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy + \param blockSize - Returned maximum block size that can achieve the maximum occupancy + \param func - Kernel for which launch configuration is calculated + \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size + \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes + \param blockSizeLimit - The maximum block size \p func is designed to handle + \param flags - Options + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cudaOccupancyMaxPotentialBlockSizeWithFlags*/ + fn cuOccupancyMaxPotentialBlockSizeWithFlags( + minGridSize: *mut ::core::ffi::c_int, + blockSize: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize, + dynamicSMemSize: usize, + blockSizeLimit: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM + + Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + + \param dynamicSmemSize - Returned maximum dynamic shared memory + \param func - Kernel function for which occupancy is calculated + \param numBlocks - Number of blocks to fit on SM + \param blockSize - Size of the blocks + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNKNOWN + \notefnerr*/ + fn cuOccupancyAvailableDynamicSMemPerBlock( + dynamicSmemSize: *mut usize, + func: cuda_types::CUfunction, + numBlocks: ::core::ffi::c_int, + blockSize: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Given the kernel function (\p func) and launch configuration + (\p config), return the maximum cluster size in \p *clusterSize. + + The cluster dimensions in \p config are ignored. If func has a required + cluster size set (see ::cudaFuncGetAttributes / ::cuFuncGetAttribute),\p + *clusterSize will reflect the required cluster size. + + By default this function will always return a value that's portable on + future hardware. A higher value may be returned if the kernel function + allows non-portable cluster sizes. + + This function will respect the compile time launch bounds. + + \param clusterSize - Returned maximum cluster size that can be launched + for the given kernel function and launch configuration + \param func - Kernel function for which maximum cluster + size is calculated + \param config - Launch configuration for the given kernel function + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cudaFuncGetAttributes, + ::cuFuncGetAttribute*/ + fn cuOccupancyMaxPotentialClusterSize( + clusterSize: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + config: *const cuda_types::CUlaunchConfig, + ) -> cuda_types::CUresult; + /** \brief Given the kernel function (\p func) and launch configuration + (\p config), return the maximum number of clusters that could co-exist + on the target device in \p *numClusters. + + If the function has required cluster size already set (see + ::cudaFuncGetAttributes / ::cuFuncGetAttribute), the cluster size + from config must either be unspecified or match the required size. + Without required sizes, the cluster size must be specified in config, + else the function will return an error. + + Note that various attributes of the kernel function may affect occupancy + calculation. Runtime environment may affect how the hardware schedules + the clusters, so the calculated occupancy is not guaranteed to be achievable. + + \param numClusters - Returned maximum number of clusters that + could co-exist on the target device + \param func - Kernel function for which maximum number + of clusters are calculated + \param config - Launch configuration for the given kernel function + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_CLUSTER_SIZE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cudaFuncGetAttributes, + ::cuFuncGetAttribute*/ + fn cuOccupancyMaxActiveClusters( + numClusters: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + config: *const cuda_types::CUlaunchConfig, + ) -> cuda_types::CUresult; + /** \brief Binds an array as a texture reference + + \deprecated + + Binds the CUDA array \p hArray to the texture reference \p hTexRef. Any + previous address or CUDA array state associated with the texture reference + is superseded by this function. \p Flags must be set to + ::CU_TRSA_OVERRIDE_FORMAT. Any CUDA array previously bound to \p hTexRef is + unbound. + + \param hTexRef - Texture reference to bind + \param hArray - Array to bind + \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetArray( + hTexRef: cuda_types::CUtexref, + hArray: cuda_types::CUarray, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Binds a mipmapped array to a texture reference + + \deprecated + + Binds the CUDA mipmapped array \p hMipmappedArray to the texture reference \p hTexRef. + Any previous address or CUDA array state associated with the texture reference + is superseded by this function. \p Flags must be set to ::CU_TRSA_OVERRIDE_FORMAT. + Any CUDA array previously bound to \p hTexRef is unbound. + + \param hTexRef - Texture reference to bind + \param hMipmappedArray - Mipmapped array to bind + \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetMipmappedArray( + hTexRef: cuda_types::CUtexref, + hMipmappedArray: cuda_types::CUmipmappedArray, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Binds an address as a texture reference + + \deprecated + + Binds a linear address range to the texture reference \p hTexRef. Any + previous address or CUDA array state associated with the texture reference + is superseded by this function. Any memory previously bound to \p hTexRef + is unbound. + + Since the hardware enforces an alignment requirement on texture base + addresses, ::cuTexRefSetAddress() passes back a byte offset in + \p *ByteOffset that must be applied to texture fetches in order to read from + the desired memory. This offset must be divided by the texel size and + passed to kernels that read from the texture so they can be applied to the + ::tex1Dfetch() function. + + If the device memory pointer was returned from ::cuMemAlloc(), the offset + is guaranteed to be 0 and NULL may be passed as the \p ByteOffset parameter. + + The total number of elements (or texels) in the linear address range + cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. + The number of elements is computed as (\p bytes / bytesPerElement), + where bytesPerElement is determined from the data format and number of + components set using ::cuTexRefSetFormat(). + + \param ByteOffset - Returned byte offset + \param hTexRef - Texture reference to bind + \param dptr - Device pointer to bind + \param bytes - Size of memory to bind in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetAddress_v2( + ByteOffset: *mut usize, + hTexRef: cuda_types::CUtexref, + dptr: cuda_types::CUdeviceptr, + bytes: usize, + ) -> cuda_types::CUresult; + /** \brief Binds an address as a 2D texture reference + + \deprecated + + Binds a linear address range to the texture reference \p hTexRef. Any + previous address or CUDA array state associated with the texture reference + is superseded by this function. Any memory previously bound to \p hTexRef + is unbound. + + Using a ::tex2D() function inside a kernel requires a call to either + ::cuTexRefSetArray() to bind the corresponding texture reference to an + array, or ::cuTexRefSetAddress2D() to bind the texture reference to linear + memory. + + Function calls to ::cuTexRefSetFormat() cannot follow calls to + ::cuTexRefSetAddress2D() for the same texture reference. + + It is required that \p dptr be aligned to the appropriate hardware-specific + texture alignment. You can query this value using the device attribute + ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. If an unaligned \p dptr is + supplied, ::CUDA_ERROR_INVALID_VALUE is returned. + + \p Pitch has to be aligned to the hardware-specific texture pitch alignment. + This value can be queried using the device attribute + ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. If an unaligned \p Pitch is + supplied, ::CUDA_ERROR_INVALID_VALUE is returned. + + Width and Height, which are specified in elements (or texels), cannot exceed + ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and + ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. + \p Pitch, which is specified in bytes, cannot exceed + ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. + + \param hTexRef - Texture reference to bind + \param desc - Descriptor of CUDA array + \param dptr - Device pointer to bind + \param Pitch - Line pitch in bytes + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetAddress2D_v3( + hTexRef: cuda_types::CUtexref, + desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR, + dptr: cuda_types::CUdeviceptr, + Pitch: usize, + ) -> cuda_types::CUresult; + /** \brief Sets the format for a texture reference + + \deprecated + + Specifies the format of the data to be read by the texture reference + \p hTexRef. \p fmt and \p NumPackedComponents are exactly analogous to the + ::Format and ::NumChannels members of the ::CUDA_ARRAY_DESCRIPTOR structure: + They specify the format of each component and the number of components per + array element. + + \param hTexRef - Texture reference + \param fmt - Format to set + \param NumPackedComponents - Number of components per array element + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, + ::cudaCreateChannelDesc*/ + fn cuTexRefSetFormat( + hTexRef: cuda_types::CUtexref, + fmt: cuda_types::CUarray_format, + NumPackedComponents: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Sets the addressing mode for a texture reference + + \deprecated + + Specifies the addressing mode \p am for the given dimension \p dim of the + texture reference \p hTexRef. If \p dim is zero, the addressing mode is + applied to the first parameter of the functions used to fetch from the + texture; if \p dim is 1, the second, and so on. ::CUaddress_mode is defined + as: + \code +typedef enum CUaddress_mode_enum { +CU_TR_ADDRESS_MODE_WRAP = 0, +CU_TR_ADDRESS_MODE_CLAMP = 1, +CU_TR_ADDRESS_MODE_MIRROR = 2, +CU_TR_ADDRESS_MODE_BORDER = 3 +} CUaddress_mode; + \endcode + + Note that this call has no effect if \p hTexRef is bound to linear memory. + Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES, is not set, the only + supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. + + \param hTexRef - Texture reference + \param dim - Dimension + \param am - Addressing mode to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetAddressMode( + hTexRef: cuda_types::CUtexref, + dim: ::core::ffi::c_int, + am: cuda_types::CUaddress_mode, + ) -> cuda_types::CUresult; + /** \brief Sets the filtering mode for a texture reference + + \deprecated + + Specifies the filtering mode \p fm to be used when reading memory through + the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: + + \code +typedef enum CUfilter_mode_enum { +CU_TR_FILTER_MODE_POINT = 0, +CU_TR_FILTER_MODE_LINEAR = 1 +} CUfilter_mode; + \endcode + + Note that this call has no effect if \p hTexRef is bound to linear memory. + + \param hTexRef - Texture reference + \param fm - Filtering mode to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetFilterMode( + hTexRef: cuda_types::CUtexref, + fm: cuda_types::CUfilter_mode, + ) -> cuda_types::CUresult; + /** \brief Sets the mipmap filtering mode for a texture reference + + \deprecated + + Specifies the mipmap filtering mode \p fm to be used when reading memory through + the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: + + \code +typedef enum CUfilter_mode_enum { +CU_TR_FILTER_MODE_POINT = 0, +CU_TR_FILTER_MODE_LINEAR = 1 +} CUfilter_mode; + \endcode + + Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + + \param hTexRef - Texture reference + \param fm - Filtering mode to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetMipmapFilterMode( + hTexRef: cuda_types::CUtexref, + fm: cuda_types::CUfilter_mode, + ) -> cuda_types::CUresult; + /** \brief Sets the mipmap level bias for a texture reference + + \deprecated + + Specifies the mipmap level bias \p bias to be added to the specified mipmap level when + reading memory through the texture reference \p hTexRef. + + Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + + \param hTexRef - Texture reference + \param bias - Mipmap level bias + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetMipmapLevelBias( + hTexRef: cuda_types::CUtexref, + bias: f32, + ) -> cuda_types::CUresult; + /** \brief Sets the mipmap min/max mipmap level clamps for a texture reference + + \deprecated + + Specifies the min/max mipmap level clamps, \p minMipmapLevelClamp and \p maxMipmapLevelClamp + respectively, to be used when reading memory through the texture reference + \p hTexRef. + + Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + + \param hTexRef - Texture reference + \param minMipmapLevelClamp - Mipmap min level clamp + \param maxMipmapLevelClamp - Mipmap max level clamp + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetMipmapLevelClamp( + hTexRef: cuda_types::CUtexref, + minMipmapLevelClamp: f32, + maxMipmapLevelClamp: f32, + ) -> cuda_types::CUresult; + /** \brief Sets the maximum anisotropy for a texture reference + + \deprecated + + Specifies the maximum anisotropy \p maxAniso to be used when reading memory through + the texture reference \p hTexRef. + + Note that this call has no effect if \p hTexRef is bound to linear memory. + + \param hTexRef - Texture reference + \param maxAniso - Maximum anisotropy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetMaxAnisotropy( + hTexRef: cuda_types::CUtexref, + maxAniso: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Sets the border color for a texture reference + + \deprecated + + Specifies the value of the RGBA color via the \p pBorderColor to the texture reference + \p hTexRef. The color value supports only float type and holds color components in + the following sequence: + pBorderColor[0] holds 'R' component + pBorderColor[1] holds 'G' component + pBorderColor[2] holds 'B' component + pBorderColor[3] holds 'A' component + + Note that the color values can be set only when the Address mode is set to + CU_TR_ADDRESS_MODE_BORDER using ::cuTexRefSetAddressMode. + Applications using integer border color values have to "reinterpret_cast" their values to float. + + \param hTexRef - Texture reference + \param pBorderColor - RGBA color + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddressMode, + ::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor*/ + fn cuTexRefSetBorderColor( + hTexRef: cuda_types::CUtexref, + pBorderColor: *mut f32, + ) -> cuda_types::CUresult; + /** \brief Sets the flags for a texture reference + + \deprecated + + Specifies optional flags via \p Flags to specify the behavior of data + returned through the texture reference \p hTexRef. The valid flags are: + + - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of + having the texture promote integer data to floating point data in the + range [0, 1]. Note that texture with 32-bit integer format + would not be promoted, regardless of whether or not this + flag is specified; + - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the + default behavior of having the texture coordinates range + from [0, Dim) where Dim is the width or height of the CUDA + array. Instead, the texture coordinates [0, 1.0) reference + the entire breadth of the array dimension; + - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear + filtering optimizations. Trilinear optimizations improve texture filtering + performance by allowing bilinear filtering on textures in scenarios where + it can closely approximate the expected results. + + \param hTexRef - Texture reference + \param Flags - Optional flags to set + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefSetFlags( + hTexRef: cuda_types::CUtexref, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Gets the address associated with a texture reference + + \deprecated + + Returns in \p *pdptr the base address bound to the texture reference + \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + is not bound to any device memory range. + + \param pdptr - Returned device address + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetAddress_v2( + pdptr: *mut cuda_types::CUdeviceptr, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the array bound to a texture reference + + \deprecated + + Returns in \p *phArray the CUDA array bound to the texture reference + \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + is not bound to any CUDA array. + + \param phArray - Returned array + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetArray( + phArray: *mut cuda_types::CUarray, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the mipmapped array bound to a texture reference + + \deprecated + + Returns in \p *phMipmappedArray the CUDA mipmapped array bound to the texture + reference \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + is not bound to any CUDA mipmapped array. + + \param phMipmappedArray - Returned mipmapped array + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetMipmappedArray( + phMipmappedArray: *mut cuda_types::CUmipmappedArray, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the addressing mode used by a texture reference + + \deprecated + + Returns in \p *pam the addressing mode corresponding to the + dimension \p dim of the texture reference \p hTexRef. Currently, the only + valid value for \p dim are 0 and 1. + + \param pam - Returned addressing mode + \param hTexRef - Texture reference + \param dim - Dimension + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetAddressMode( + pam: *mut cuda_types::CUaddress_mode, + hTexRef: cuda_types::CUtexref, + dim: ::core::ffi::c_int, + ) -> cuda_types::CUresult; + /** \brief Gets the filter-mode used by a texture reference + + \deprecated + + Returns in \p *pfm the filtering mode of the texture reference + \p hTexRef. + + \param pfm - Returned filtering mode + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetFilterMode( + pfm: *mut cuda_types::CUfilter_mode, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the format used by a texture reference + + \deprecated + + Returns in \p *pFormat and \p *pNumChannels the format and number + of components of the CUDA array bound to the texture reference \p hTexRef. + If \p pFormat or \p pNumChannels is NULL, it will be ignored. + + \param pFormat - Returned format + \param pNumChannels - Returned number of components + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags*/ + fn cuTexRefGetFormat( + pFormat: *mut cuda_types::CUarray_format, + pNumChannels: *mut ::core::ffi::c_int, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the mipmap filtering mode for a texture reference + + \deprecated + + Returns the mipmap filtering mode in \p pfm that's used when reading memory through + the texture reference \p hTexRef. + + \param pfm - Returned mipmap filtering mode + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetMipmapFilterMode( + pfm: *mut cuda_types::CUfilter_mode, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the mipmap level bias for a texture reference + + \deprecated + + Returns the mipmap level bias in \p pBias that's added to the specified mipmap + level when reading memory through the texture reference \p hTexRef. + + \param pbias - Returned mipmap level bias + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetMipmapLevelBias( + pbias: *mut f32, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the min/max mipmap level clamps for a texture reference + + \deprecated + + Returns the min/max mipmap level clamps in \p pminMipmapLevelClamp and \p pmaxMipmapLevelClamp + that's used when reading memory through the texture reference \p hTexRef. + + \param pminMipmapLevelClamp - Returned mipmap min level clamp + \param pmaxMipmapLevelClamp - Returned mipmap max level clamp + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetMipmapLevelClamp( + pminMipmapLevelClamp: *mut f32, + pmaxMipmapLevelClamp: *mut f32, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the maximum anisotropy for a texture reference + + \deprecated + + Returns the maximum anisotropy in \p pmaxAniso that's used when reading memory through + the texture reference \p hTexRef. + + \param pmaxAniso - Returned maximum anisotropy + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat*/ + fn cuTexRefGetMaxAnisotropy( + pmaxAniso: *mut ::core::ffi::c_int, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the border color used by a texture reference + + \deprecated + + Returns in \p pBorderColor, values of the RGBA color used by + the texture reference \p hTexRef. + The color value is of type float and holds color components in + the following sequence: + pBorderColor[0] holds 'R' component + pBorderColor[1] holds 'G' component + pBorderColor[2] holds 'B' component + pBorderColor[3] holds 'A' component + + \param hTexRef - Texture reference + \param pBorderColor - Returned Type and Value of RGBA color + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddressMode, + ::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor*/ + fn cuTexRefGetBorderColor( + pBorderColor: *mut f32, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Gets the flags used by a texture reference + + \deprecated + + Returns in \p *pFlags the flags of the texture reference \p hTexRef. + + \param pFlags - Returned flags + \param hTexRef - Texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefSetAddress, + ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + ::cuTexRefGetFilterMode, ::cuTexRefGetFormat*/ + fn cuTexRefGetFlags( + pFlags: *mut ::core::ffi::c_uint, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + /** \brief Creates a texture reference + + \deprecated + + Creates a texture reference and returns its handle in \p *pTexRef. Once + created, the application must call ::cuTexRefSetArray() or + ::cuTexRefSetAddress() to associate the reference with allocated memory. + Other texture reference functions are used to specify the format and + interpretation (addressing, filtering, etc.) to be used when the memory is + read through this texture reference. + + \param pTexRef - Returned texture reference + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefDestroy*/ + fn cuTexRefCreate(pTexRef: *mut cuda_types::CUtexref) -> cuda_types::CUresult; + /** \brief Destroys a texture reference + + \deprecated + + Destroys the texture reference specified by \p hTexRef. + + \param hTexRef - Texture reference to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuTexRefCreate*/ + fn cuTexRefDestroy(hTexRef: cuda_types::CUtexref) -> cuda_types::CUresult; + /** \brief Sets the CUDA array for a surface reference. + + \deprecated + + Sets the CUDA array \p hArray to be read and written by the surface reference + \p hSurfRef. Any previous CUDA array state associated with the surface + reference is superseded by this function. \p Flags must be set to 0. + The ::CUDA_ARRAY3D_SURFACE_LDST flag must have been set for the CUDA array. + Any CUDA array previously bound to \p hSurfRef is unbound. + + \param hSurfRef - Surface reference handle + \param hArray - CUDA array handle + \param Flags - set to 0 + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuModuleGetSurfRef, + ::cuSurfRefGetArray*/ + fn cuSurfRefSetArray( + hSurfRef: cuda_types::CUsurfref, + hArray: cuda_types::CUarray, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Passes back the CUDA array bound to a surface reference. + + \deprecated + + Returns in \p *phArray the CUDA array bound to the surface reference + \p hSurfRef, or returns ::CUDA_ERROR_INVALID_VALUE if the surface reference + is not bound to any CUDA array. + + \param phArray - Surface reference handle + \param hSurfRef - Surface reference handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray*/ + fn cuSurfRefGetArray( + phArray: *mut cuda_types::CUarray, + hSurfRef: cuda_types::CUsurfref, + ) -> cuda_types::CUresult; + /** \brief Creates a texture object + + Creates a texture object and returns it in \p pTexObject. \p pResDesc describes + the data to texture from. \p pTexDesc describes how the data should be sampled. + \p pResViewDesc is an optional argument that specifies an alternate format for + the data described by \p pResDesc, and also describes the subresource region + to restrict access to when texturing. \p pResViewDesc can only be specified if + the type of resource is a CUDA array or a CUDA mipmapped array. + + Texture objects are only supported on devices of compute capability 3.0 or higher. + Additionally, a texture object is an opaque value, and, as such, should only be + accessed through CUDA API calls. + + The ::CUDA_RESOURCE_DESC structure is defined as: + \code +typedef struct CUDA_RESOURCE_DESC_st +{ +CUresourcetype resType; + +union { +struct { +CUarray hArray; +} array; +struct { +CUmipmappedArray hMipmappedArray; +} mipmap; +struct { +CUdeviceptr devPtr; +CUarray_format format; +unsigned int numChannels; +size_t sizeInBytes; +} linear; +struct { +CUdeviceptr devPtr; +CUarray_format format; +unsigned int numChannels; +size_t width; +size_t height; +size_t pitchInBytes; +} pitch2D; +} res; + +unsigned int flags; +} CUDA_RESOURCE_DESC; + + \endcode + where: + - ::CUDA_RESOURCE_DESC::resType specifies the type of resource to texture from. + CUresourceType is defined as: + \code +typedef enum CUresourcetype_enum { +CU_RESOURCE_TYPE_ARRAY = 0x00, +CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, +CU_RESOURCE_TYPE_LINEAR = 0x02, +CU_RESOURCE_TYPE_PITCH2D = 0x03 +} CUresourcetype; + \endcode + + \par + If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_ARRAY, ::CUDA_RESOURCE_DESC::res::array::hArray + must be set to a valid CUDA array handle. + + \par + If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, ::CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray + must be set to a valid CUDA mipmapped array handle. + + \par + If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_LINEAR, ::CUDA_RESOURCE_DESC::res::linear::devPtr + must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. + ::CUDA_RESOURCE_DESC::res::linear::format and ::CUDA_RESOURCE_DESC::res::linear::numChannels + describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::linear::sizeInBytes + specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed + ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The number of elements is computed as (sizeInBytes / (sizeof(format) * numChannels)). + + \par + If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_PITCH2D, ::CUDA_RESOURCE_DESC::res::pitch2D::devPtr + must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. + ::CUDA_RESOURCE_DESC::res::pitch2D::format and ::CUDA_RESOURCE_DESC::res::pitch2D::numChannels + describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::pitch2D::width + and ::CUDA_RESOURCE_DESC::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed + ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. + ::CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to + ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. + + - ::flags must be set to zero. + + + The ::CUDA_TEXTURE_DESC struct is defined as + \code +typedef struct CUDA_TEXTURE_DESC_st { +CUaddress_mode addressMode[3]; +CUfilter_mode filterMode; +unsigned int flags; +unsigned int maxAnisotropy; +CUfilter_mode mipmapFilterMode; +float mipmapLevelBias; +float minMipmapLevelClamp; +float maxMipmapLevelClamp; +} CUDA_TEXTURE_DESC; + \endcode + where + - ::CUDA_TEXTURE_DESC::addressMode specifies the addressing mode for each dimension of the texture data. ::CUaddress_mode is defined as: + \code +typedef enum CUaddress_mode_enum { +CU_TR_ADDRESS_MODE_WRAP = 0, +CU_TR_ADDRESS_MODE_CLAMP = 1, +CU_TR_ADDRESS_MODE_MIRROR = 2, +CU_TR_ADDRESS_MODE_BORDER = 3 +} CUaddress_mode; + \endcode + This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES + is not set, the only supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. + + - ::CUDA_TEXTURE_DESC::filterMode specifies the filtering mode to be used when fetching from the texture. CUfilter_mode is defined as: + \code +typedef enum CUfilter_mode_enum { +CU_TR_FILTER_MODE_POINT = 0, +CU_TR_FILTER_MODE_LINEAR = 1 +} CUfilter_mode; + \endcode + This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. + + - ::CUDA_TEXTURE_DESC::flags can be any combination of the following: + - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of + having the texture promote integer data to floating point data in the + range [0, 1]. Note that texture with 32-bit integer format would not be + promoted, regardless of whether or not this flag is specified. + - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the default behavior + of having the texture coordinates range from [0, Dim) where Dim is the + width or height of the CUDA array. Instead, the texture coordinates + [0, 1.0) reference the entire breadth of the array dimension; Note that + for CUDA mipmapped arrays, this flag has to be set. + - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear + filtering optimizations. Trilinear optimizations improve texture filtering + performance by allowing bilinear filtering on textures in scenarios where + it can closely approximate the expected results. + - ::CU_TRSF_SEAMLESS_CUBEMAP, which enables seamless cube map filtering. + This flag can only be specified if the underlying resource is a CUDA array + or a CUDA mipmapped array that was created with the flag ::CUDA_ARRAY3D_CUBEMAP. + When seamless cube map filtering is enabled, texture address modes specified + by ::CUDA_TEXTURE_DESC::addressMode are ignored. Instead, if the ::CUDA_TEXTURE_DESC::filterMode + is set to ::CU_TR_FILTER_MODE_POINT the address mode ::CU_TR_ADDRESS_MODE_CLAMP + will be applied for all dimensions. If the ::CUDA_TEXTURE_DESC::filterMode is + set to ::CU_TR_FILTER_MODE_LINEAR seamless cube map filtering will be performed + when sampling along the cube face borders. + + - ::CUDA_TEXTURE_DESC::maxAnisotropy specifies the maximum anisotropy ratio to be used when doing anisotropic filtering. This value will be + clamped to the range [1,16]. + + - ::CUDA_TEXTURE_DESC::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels. + + - ::CUDA_TEXTURE_DESC::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level. + + - ::CUDA_TEXTURE_DESC::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to. + + - ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to. + + + The ::CUDA_RESOURCE_VIEW_DESC struct is defined as + \code +typedef struct CUDA_RESOURCE_VIEW_DESC_st +{ +CUresourceViewFormat format; +size_t width; +size_t height; +size_t depth; +unsigned int firstMipmapLevel; +unsigned int lastMipmapLevel; +unsigned int firstLayer; +unsigned int lastLayer; +} CUDA_RESOURCE_VIEW_DESC; + \endcode + where: + - ::CUDA_RESOURCE_VIEW_DESC::format specifies how the data contained in the CUDA array or CUDA mipmapped array should + be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block + compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a base of format ::CU_AD_FORMAT_UNSIGNED_INT32. + with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have + a format of ::CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC formats require the underlying resource to have the same base + format but with 4 channels. + + - ::CUDA_RESOURCE_VIEW_DESC::width specifies the new width of the texture data. If the resource view format is a block + compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats, + this value has to be equal to that of the original resource. + + - ::CUDA_RESOURCE_VIEW_DESC::height specifies the new height of the texture data. If the resource view format is a block + compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats, + this value has to be equal to that of the original resource. + + - ::CUDA_RESOURCE_VIEW_DESC::depth specifies the new depth of the texture data. This value has to be equal to that of the + original resource. + + - ::CUDA_RESOURCE_VIEW_DESC::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero. + For non-mipmapped resources, this value has to be zero.::CUDA_TEXTURE_DESC::minMipmapLevelClamp and ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp + will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified, + then the actual minimum mipmap level clamp will be 3.2. + + - ::CUDA_RESOURCE_VIEW_DESC::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value + has to be zero. + + - ::CUDA_RESOURCE_VIEW_DESC::firstLayer specifies the first layer index for layered textures. This will be the new layer zero. + For non-layered resources, this value has to be zero. + + - ::CUDA_RESOURCE_VIEW_DESC::lastLayer specifies the last layer index for layered textures. For non-layered resources, + this value has to be zero. + + + \param pTexObject - Texture object to create + \param pResDesc - Resource descriptor + \param pTexDesc - Texture descriptor + \param pResViewDesc - Resource view descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexObjectDestroy, + ::cudaCreateTextureObject*/ + fn cuTexObjectCreate( + pTexObject: *mut cuda_types::CUtexObject, + pResDesc: *const cuda_types::CUDA_RESOURCE_DESC, + pTexDesc: *const cuda_types::CUDA_TEXTURE_DESC, + pResViewDesc: *const cuda_types::CUDA_RESOURCE_VIEW_DESC, + ) -> cuda_types::CUresult; + /** \brief Destroys a texture object + + Destroys the texture object specified by \p texObject. + + \param texObject - Texture object to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexObjectCreate, + ::cudaDestroyTextureObject*/ + fn cuTexObjectDestroy(texObject: cuda_types::CUtexObject) -> cuda_types::CUresult; + /** \brief Returns a texture object's resource descriptor + + Returns the resource descriptor for the texture object specified by \p texObject. + + \param pResDesc - Resource descriptor + \param texObject - Texture object + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexObjectCreate, + ::cudaGetTextureObjectResourceDesc,*/ + fn cuTexObjectGetResourceDesc( + pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC, + texObject: cuda_types::CUtexObject, + ) -> cuda_types::CUresult; + /** \brief Returns a texture object's texture descriptor + + Returns the texture descriptor for the texture object specified by \p texObject. + + \param pTexDesc - Texture descriptor + \param texObject - Texture object + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexObjectCreate, + ::cudaGetTextureObjectTextureDesc*/ + fn cuTexObjectGetTextureDesc( + pTexDesc: *mut cuda_types::CUDA_TEXTURE_DESC, + texObject: cuda_types::CUtexObject, + ) -> cuda_types::CUresult; + /** \brief Returns a texture object's resource view descriptor + + Returns the resource view descriptor for the texture object specified by \p texObject. + If no resource view was set for \p texObject, the ::CUDA_ERROR_INVALID_VALUE is returned. + + \param pResViewDesc - Resource view descriptor + \param texObject - Texture object + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTexObjectCreate, + ::cudaGetTextureObjectResourceViewDesc*/ + fn cuTexObjectGetResourceViewDesc( + pResViewDesc: *mut cuda_types::CUDA_RESOURCE_VIEW_DESC, + texObject: cuda_types::CUtexObject, + ) -> cuda_types::CUresult; + /** \brief Creates a surface object + + Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes + the data to perform surface load/stores on. ::CUDA_RESOURCE_DESC::resType must be + ::CU_RESOURCE_TYPE_ARRAY and ::CUDA_RESOURCE_DESC::res::array::hArray + must be set to a valid CUDA array handle. ::CUDA_RESOURCE_DESC::flags must be set to zero. + + Surface objects are only supported on devices of compute capability 3.0 or higher. + Additionally, a surface object is an opaque value, and, as such, should only be + accessed through CUDA API calls. + + \param pSurfObject - Surface object to create + \param pResDesc - Resource descriptor + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuSurfObjectDestroy, + ::cudaCreateSurfaceObject*/ + fn cuSurfObjectCreate( + pSurfObject: *mut cuda_types::CUsurfObject, + pResDesc: *const cuda_types::CUDA_RESOURCE_DESC, + ) -> cuda_types::CUresult; + /** \brief Destroys a surface object + + Destroys the surface object specified by \p surfObject. + + \param surfObject - Surface object to destroy + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuSurfObjectCreate, + ::cudaDestroySurfaceObject*/ + fn cuSurfObjectDestroy(surfObject: cuda_types::CUsurfObject) -> cuda_types::CUresult; + /** \brief Returns a surface object's resource descriptor + + Returns the resource descriptor for the surface object specified by \p surfObject. + + \param pResDesc - Resource descriptor + \param surfObject - Surface object + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuSurfObjectCreate, + ::cudaGetSurfaceObjectResourceDesc*/ + fn cuSurfObjectGetResourceDesc( + pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC, + surfObject: cuda_types::CUsurfObject, + ) -> cuda_types::CUresult; + /** \brief Create a tensor map descriptor object representing tiled memory region + + Creates a descriptor for Tensor Memory Access (TMA) object specified + by the parameters describing a tiled region and returns it in \p tensorMap. + + Tensor map objects are only supported on devices of compute capability 9.0 or higher. + Additionally, a tensor map object is an opaque value, and, as such, should only be + accessed through CUDA API calls. + + The parameters passed are bound to the following requirements: + + - \p tensorMap address must be aligned to 64 bytes. + + - \p tensorDataType has to be an enum from ::CUtensorMapDataType which is defined as: + \code +typedef enum CUtensorMapDataType_enum { +CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, // 1 byte +CU_TENSOR_MAP_DATA_TYPE_UINT16, // 2 bytes +CU_TENSOR_MAP_DATA_TYPE_UINT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_INT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_UINT64, // 8 bytes +CU_TENSOR_MAP_DATA_TYPE_INT64, // 8 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT16, // 2 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT64, // 8 bytes +CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, // 2 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ // 4 bytes +} CUtensorMapDataType; + \endcode + + - \p tensorRank must be non-zero and less than or equal to the maximum supported dimensionality of 5. If \p interleave is not + ::CU_TENSOR_MAP_INTERLEAVE_NONE, then \p tensorRank must additionally be greater than or equal to 3. + + - \p globalAddress, which specifies the starting address of the memory region described, must be 32 byte aligned when \p interleave is + ::CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned otherwise. + + - \p globalDim array, which specifies tensor size of each of the \p tensorRank dimensions, must be non-zero and less than or + equal to 2^32. + + - \p globalStrides array, which specifies tensor stride of each of the lower \p tensorRank - 1 dimensions in bytes, must be a + multiple of 16 and less than 2^40. Additionally, the stride must be a multiple of 32 when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B. + Each following dimension specified includes previous dimension stride: + \code +globalStrides[0] = globalDim[0] * elementSizeInBytes(tensorDataType) + padding[0]; +for (i = 1; i < tensorRank - 1; i++) +globalStrides[i] = globalStrides[i – 1] * (globalDim[i] + padding[i]); +assert(globalStrides[i] >= globalDim[i]); + \endcode + + - \p boxDim array, which specifies number of elements to be traversed along each of the \p tensorRank dimensions, must be non-zero + and less than or equal to 256. + When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, { \p boxDim[0] * elementSizeInBytes( \p tensorDataType ) } must be a multiple + of 16 bytes. + + - \p elementStrides array, which specifies the iteration step along each of the \p tensorRank dimensions, must be non-zero and less + than or equal to 8. Note that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this array is ignored since + TMA doesn’t support the stride for dimension zero. + When all elements of \p elementStrides array is one, \p boxDim specifies the number of elements to load. However, if the \p elementStrides[i] + is not equal to one, then TMA loads ceil( \p boxDim[i] / \p elementStrides[i]) number of elements along i-th dimension. To load N elements along + i-th dimension, \p boxDim[i] must be set to N * \p elementStrides[i]. + + - \p interleave specifies the interleaved layout of type ::CUtensorMapInterleave, which is defined as: + \code +typedef enum CUtensorMapInterleave_enum { +CU_TENSOR_MAP_INTERLEAVE_NONE = 0, +CU_TENSOR_MAP_INTERLEAVE_16B, +CU_TENSOR_MAP_INTERLEAVE_32B +} CUtensorMapInterleave; + \endcode + TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 + uses 32 bytes. + When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE and \p swizzle is not ::CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner dimension + (computed as \p boxDim[0] multiplied by element size derived from \p tensorDataType) must be less than or equal to the swizzle size. + - CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension will be <= 32. + - CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension will be <= 64. + - CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension will be <= 128. + + - \p swizzle, which specifies the shared memory bank swizzling pattern, has to be of type ::CUtensorMapSwizzle which is defined as: + \code +typedef enum CUtensorMapSwizzle_enum { +CU_TENSOR_MAP_SWIZZLE_NONE = 0, +CU_TENSOR_MAP_SWIZZLE_32B, +CU_TENSOR_MAP_SWIZZLE_64B, +CU_TENSOR_MAP_SWIZZLE_128B +} CUtensorMapSwizzle; + \endcode + Data are organized in a specific order in global memory; however, this may not match the order in which the application accesses data + in shared memory. This difference in data organization may cause bank conflicts when shared memory is accessed. In order to avoid this + problem, data can be loaded to shared memory with shuffling across shared memory banks. + When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B, \p swizzle must be ::CU_TENSOR_MAP_SWIZZLE_32B. + Other interleave modes can have any swizzling pattern. + + - \p l2Promotion specifies L2 fetch size which indicates the byte granurality at which L2 requests is filled from DRAM. It must be of + type ::CUtensorMapL2promotion, which is defined as: + \code +typedef enum CUtensorMapL2promotion_enum { +CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, +CU_TENSOR_MAP_L2_PROMOTION_L2_64B, +CU_TENSOR_MAP_L2_PROMOTION_L2_128B, +CU_TENSOR_MAP_L2_PROMOTION_L2_256B +} CUtensorMapL2promotion; + \endcode + + - \p oobFill, which indicates whether zero or a special NaN constant should be used to fill out-of-bound elements, must be of type + ::CUtensorMapFloatOOBfill which is defined as: + \code +typedef enum CUtensorMapFloatOOBfill_enum { +CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, +CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA +} CUtensorMapFloatOOBfill; + \endcode + Note that ::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can only be used when \p tensorDataType represents a floating-point data type. + + \param tensorMap - Tensor map object to create + \param tensorDataType - Tensor data type + \param tensorRank - Dimensionality of tensor + \param globalAddress - Starting address of memory region described by tensor + \param globalDim - Array containing tensor size (number of elements) along each of the \p tensorRank dimensions + \param globalStrides - Array containing stride size (in bytes) along each of the \p tensorRank - 1 dimensions + \param boxDim - Array containing traversal box size (number of elments) along each of the \p tensorRank dimensions. Specifies how many elements to be traversed along each tensor dimension. + \param elementStrides - Array containing traversal stride in each of the \p tensorRank dimensions + \param interleave - Type of interleaved layout the tensor addresses + \param swizzle - Bank swizzling pattern inside shared memory + \param l2Promotion - L2 promotion size + \param oobFill - Indicate whether zero or special NaN constant must be used to fill out-of-bound elements + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTensorMapEncodeIm2col, + ::cuTensorMapReplaceAddress*/ + fn cuTensorMapEncodeTiled( + tensorMap: *mut cuda_types::CUtensorMap, + tensorDataType: cuda_types::CUtensorMapDataType, + tensorRank: cuda_types::cuuint32_t, + globalAddress: *mut ::core::ffi::c_void, + globalDim: *const cuda_types::cuuint64_t, + globalStrides: *const cuda_types::cuuint64_t, + boxDim: *const cuda_types::cuuint32_t, + elementStrides: *const cuda_types::cuuint32_t, + interleave: cuda_types::CUtensorMapInterleave, + swizzle: cuda_types::CUtensorMapSwizzle, + l2Promotion: cuda_types::CUtensorMapL2promotion, + oobFill: cuda_types::CUtensorMapFloatOOBfill, + ) -> cuda_types::CUresult; + /** \brief Create a tensor map descriptor object representing im2col memory region + + Creates a descriptor for Tensor Memory Access (TMA) object specified + by the parameters describing a im2col memory layout and returns it in \p tensorMap. + + Tensor map objects are only supported on devices of compute capability 9.0 or higher. + Additionally, a tensor map object is an opaque value, and, as such, should only be + accessed through CUDA API calls. + + The parameters passed are bound to the following requirements: + + - \p tensorMap address must be aligned to 64 bytes. + + - \p tensorDataType has to be an enum from ::CUtensorMapDataType which is defined as: + \code +typedef enum CUtensorMapDataType_enum { +CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, // 1 byte +CU_TENSOR_MAP_DATA_TYPE_UINT16, // 2 bytes +CU_TENSOR_MAP_DATA_TYPE_UINT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_INT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_UINT64, // 8 bytes +CU_TENSOR_MAP_DATA_TYPE_INT64, // 8 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT16, // 2 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT64, // 8 bytes +CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, // 2 bytes +CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, // 4 bytes +CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ // 4 bytes +} CUtensorMapDataType; + \endcode + + - \p tensorRank, which specifies the number of tensor dimensions, must be 3, 4, or 5. + + - \p globalAddress, which specifies the starting address of the memory region described, must be 32 byte aligned when \p interleave is + ::CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned otherwise. + + - \p globalDim array, which specifies tensor size of each of the \p tensorRank dimensions, must be non-zero and less than or + equal to 2^32. + + - \p globalStrides array, which specifies tensor stride of each of the lower \p tensorRank - 1 dimensions in bytes, must be a + multiple of 16 and less than 2^40. Additionally, the stride must be a multiple of 32 when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B. + Each following dimension specified includes previous dimension stride: + \code +globalStrides[0] = globalDim[0] * elementSizeInBytes(tensorDataType) + padding[0]; +for (i = 1; i < tensorRank - 1; i++) +globalStrides[i] = globalStrides[i – 1] * (globalDim[i] + padding[i]); +assert(globalStrides[i] >= globalDim[i]); + \endcode + + - \p pixelBoxLowerCorner array specifies the coordinate offsets {D, H, W} of the bounding box from top/left/front corner. The number of + offsets and their precision depend on the tensor dimensionality: + - When \p tensorRank is 3, one signed offset within range [-32768, 32767] is supported. + - When \p tensorRank is 4, two signed offsets each within range [-128, 127] are supported. + - When \p tensorRank is 5, three offsets each within range [-16, 15] are supported. + + - \p pixelBoxUpperCorner array specifies the coordinate offsets {D, H, W} of the bounding box from bottom/right/back corner. The number of + offsets and their precision depend on the tensor dimensionality: + - When \p tensorRank is 3, one signed offset within range [-32768, 32767] is supported. + - When \p tensorRank is 4, two signed offsets each within range [-128, 127] are supported. + - When \p tensorRank is 5, three offsets each within range [-16, 15] are supported. + The bounding box specified by \p pixelBoxLowerCorner and \p pixelBoxUpperCorner must have non-zero area. + + - \p channelsPerPixel, which specifies the number of elements which must be accessed along C dimension, must be less than or equal to 256. + + - \p pixelsPerColumn, which specifies the number of elements that must be accessed along the {N, D, H, W} dimensions, must be less than or + equal to 1024. + + - \p elementStrides array, which specifies the iteration step along each of the \p tensorRank dimensions, must be non-zero and less + than or equal to 8. Note that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this array is ignored since + TMA doesn’t support the stride for dimension zero. + When all elements of the \p elementStrides array are one, \p boxDim specifies the number of elements to load. However, if \p elementStrides[i] + is not equal to one for some \p i, then TMA loads ceil( \p boxDim[i] / \p elementStrides[i]) number of elements along i-th dimension. + To load N elements along i-th dimension, \p boxDim[i] must be set to N * \p elementStrides[i]. + + - \p interleave specifies the interleaved layout of type ::CUtensorMapInterleave, which is defined as: + \code +typedef enum CUtensorMapInterleave_enum { +CU_TENSOR_MAP_INTERLEAVE_NONE = 0, +CU_TENSOR_MAP_INTERLEAVE_16B, +CU_TENSOR_MAP_INTERLEAVE_32B +} CUtensorMapInterleave; + \endcode + TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 + uses 32 bytes. + When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE and \p swizzle is not ::CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner dimension + (computed as \p boxDim[0] multiplied by element size derived from \p tensorDataType) must be less than or equal to the swizzle size. + - CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension will be <= 32. + - CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension will be <= 64. + - CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension will be <= 128. + + - \p swizzle, which specifies the shared memory bank swizzling pattern, has to be of type ::CUtensorMapSwizzle which is defined as: + \code +typedef enum CUtensorMapSwizzle_enum { +CU_TENSOR_MAP_SWIZZLE_NONE = 0, +CU_TENSOR_MAP_SWIZZLE_32B, +CU_TENSOR_MAP_SWIZZLE_64B, +CU_TENSOR_MAP_SWIZZLE_128B +} CUtensorMapSwizzle; + \endcode + Data are organized in a specific order in global memory; however, this may not match the order in which the application accesses data + in shared memory. This difference in data organization may cause bank conflicts when shared memory is accessed. In order to avoid this + problem, data can be loaded to shared memory with shuffling across shared memory banks. + When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B, \p swizzle must be ::CU_TENSOR_MAP_SWIZZLE_32B. + Other interleave modes can have any swizzling pattern. + + - \p l2Promotion specifies L2 fetch size which indicates the byte granularity at which L2 requests are filled from DRAM. It must be of + type ::CUtensorMapL2promotion, which is defined as: + \code +typedef enum CUtensorMapL2promotion_enum { +CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, +CU_TENSOR_MAP_L2_PROMOTION_L2_64B, +CU_TENSOR_MAP_L2_PROMOTION_L2_128B, +CU_TENSOR_MAP_L2_PROMOTION_L2_256B +} CUtensorMapL2promotion; + \endcode + + - \p oobFill, which indicates whether zero or a special NaN constant should be used to fill out-of-bound elements, must be of type + ::CUtensorMapFloatOOBfill which is defined as: + \code +typedef enum CUtensorMapFloatOOBfill_enum { +CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, +CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA +} CUtensorMapFloatOOBfill; + \endcode + Note that ::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can only be used when \p tensorDataType represents a floating-point data type. + + \param tensorMap - Tensor map object to create + \param tensorDataType - Tensor data type + \param tensorRank - Dimensionality of tensor; must be at least 3 + \param globalAddress - Starting address of memory region described by tensor + \param globalDim - Array containing tensor size (number of elements) along each of the \p tensorRank dimensions + \param globalStrides - Array containing stride size (in bytes) along each of the \p tensorRank - 1 dimensions + \param pixelBoxLowerCorner - Array containing DHW dimensions of lower box corner + \param pixelBoxUpperCorner - Array containing DHW dimensions of upper box corner + \param channelsPerPixel - Number of channels per pixel + \param pixelsPerColumn - Number of pixels per column + \param elementStrides - Array containing traversal stride in each of the \p tensorRank dimensions + \param interleave - Type of interleaved layout the tensor addresses + \param swizzle - Bank swizzling pattern inside shared memory + \param l2Promotion - L2 promotion size + \param oobFill - Indicate whether zero or special NaN constant will be used to fill out-of-bound elements + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTensorMapEncodeTiled, + ::cuTensorMapReplaceAddress*/ + fn cuTensorMapEncodeIm2col( + tensorMap: *mut cuda_types::CUtensorMap, + tensorDataType: cuda_types::CUtensorMapDataType, + tensorRank: cuda_types::cuuint32_t, + globalAddress: *mut ::core::ffi::c_void, + globalDim: *const cuda_types::cuuint64_t, + globalStrides: *const cuda_types::cuuint64_t, + pixelBoxLowerCorner: *const ::core::ffi::c_int, + pixelBoxUpperCorner: *const ::core::ffi::c_int, + channelsPerPixel: cuda_types::cuuint32_t, + pixelsPerColumn: cuda_types::cuuint32_t, + elementStrides: *const cuda_types::cuuint32_t, + interleave: cuda_types::CUtensorMapInterleave, + swizzle: cuda_types::CUtensorMapSwizzle, + l2Promotion: cuda_types::CUtensorMapL2promotion, + oobFill: cuda_types::CUtensorMapFloatOOBfill, + ) -> cuda_types::CUresult; + /** \brief Modify an existing tensor map descriptor with an updated global address + + Modifies the descriptor for Tensor Memory Access (TMA) object passed in \p tensorMap with + an updated \p globalAddress. + + Tensor map objects are only supported on devices of compute capability 9.0 or higher. + Additionally, a tensor map object is an opaque value, and, as such, should only be + accessed through CUDA API calls. + + \param tensorMap - Tensor map object to modify + \param globalAddress - Starting address of memory region described by tensor, must follow previous alignment requirements + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuTensorMapEncodeTiled, + ::cuTensorMapEncodeIm2col*/ + fn cuTensorMapReplaceAddress( + tensorMap: *mut cuda_types::CUtensorMap, + globalAddress: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + /** \brief Queries if a device may directly access a peer device's memory. + + Returns in \p *canAccessPeer a value of 1 if contexts on \p dev are capable of + directly accessing memory from contexts on \p peerDev and 0 otherwise. + If direct access of \p peerDev from \p dev is possible, then access may be + enabled on two specific contexts by calling ::cuCtxEnablePeerAccess(). + + \param canAccessPeer - Returned access capability + \param dev - Device from which allocations on \p peerDev are to + be directly accessed. + \param peerDev - Device on which the allocations to be directly accessed + by \p dev reside. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE + \notefnerr + + \sa + ::cuCtxEnablePeerAccess, + ::cuCtxDisablePeerAccess, + ::cudaDeviceCanAccessPeer*/ + fn cuDeviceCanAccessPeer( + canAccessPeer: *mut ::core::ffi::c_int, + dev: cuda_types::CUdevice, + peerDev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Enables direct access to memory allocations in a peer context. + + If both the current context and \p peerContext are on devices which support unified + addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same + major compute capability, then on success all allocations from \p peerContext will + immediately be accessible by the current context. See \ref CUDA_UNIFIED for additional + details. + + Note that access granted by this call is unidirectional and that in order to access + memory from the current context in \p peerContext, a separate symmetric call + to ::cuCtxEnablePeerAccess() is required. + + Note that there are both device-wide and system-wide limitations per system + configuration, as noted in the CUDA Programming Guide under the section + "Peer-to-Peer Memory Access". + + Returns ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if ::cuDeviceCanAccessPeer() indicates + that the ::CUdevice of the current context cannot directly access memory + from the ::CUdevice of \p peerContext. + + Returns ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct access of + \p peerContext from the current context has already been enabled. + + Returns ::CUDA_ERROR_TOO_MANY_PEERS if direct peer access is not possible + because hardware resources required for peer access have been exhausted. + + Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, \p peerContext + is not a valid context, or if the current context is \p peerContext. + + Returns ::CUDA_ERROR_INVALID_VALUE if \p Flags is not 0. + + \param peerContext - Peer context to enable direct access to from the current context + \param Flags - Reserved for future use and must be set to 0 + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED, + ::CUDA_ERROR_TOO_MANY_PEERS, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cuDeviceCanAccessPeer, + ::cuCtxDisablePeerAccess, + ::cudaDeviceEnablePeerAccess*/ + fn cuCtxEnablePeerAccess( + peerContext: cuda_types::CUcontext, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Disables direct access to memory allocations in a peer context and + unregisters any registered allocations. + +Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has + not yet been enabled from \p peerContext to the current context. + + Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, or if + \p peerContext is not a valid context. + + \param peerContext - Peer context to disable direct access to + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED, + ::CUDA_ERROR_INVALID_CONTEXT, + \notefnerr + + \sa + ::cuDeviceCanAccessPeer, + ::cuCtxEnablePeerAccess, + ::cudaDeviceDisablePeerAccess*/ + fn cuCtxDisablePeerAccess( + peerContext: cuda_types::CUcontext, + ) -> cuda_types::CUresult; + /** \brief Queries attributes of the link between two devices. + + Returns in \p *value the value of the requested attribute \p attrib of the + link between \p srcDevice and \p dstDevice. The supported attributes are: + - ::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: A relative value indicating the + performance of the link between two devices. + - ::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED P2P: 1 if P2P Access is enable. + - ::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: 1 if Atomic operations over + the link are supported. + - ::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: 1 if cudaArray can + be accessed over the link. + + Returns ::CUDA_ERROR_INVALID_DEVICE if \p srcDevice or \p dstDevice are not valid + or if they represent the same device. + + Returns ::CUDA_ERROR_INVALID_VALUE if \p attrib is not valid or if \p value is + a null pointer. + + \param value - Returned value of the requested attribute + \param attrib - The requested attribute of the link between \p srcDevice and \p dstDevice. + \param srcDevice - The source device of the target link. + \param dstDevice - The destination device of the target link. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa + ::cuCtxEnablePeerAccess, + ::cuCtxDisablePeerAccess, + ::cuDeviceCanAccessPeer, + ::cudaDeviceGetP2PAttribute*/ + fn cuDeviceGetP2PAttribute( + value: *mut ::core::ffi::c_int, + attrib: cuda_types::CUdevice_P2PAttribute, + srcDevice: cuda_types::CUdevice, + dstDevice: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Unregisters a graphics resource for access by CUDA + + Unregisters the graphics resource \p resource so it is not accessible by + CUDA unless registered again. + + If \p resource is invalid then ::CUDA_ERROR_INVALID_HANDLE is + returned. + + \param resource - Resource to unregister + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa + ::cuGraphicsD3D9RegisterResource, + ::cuGraphicsD3D10RegisterResource, + ::cuGraphicsD3D11RegisterResource, + ::cuGraphicsGLRegisterBuffer, + ::cuGraphicsGLRegisterImage, + ::cudaGraphicsUnregisterResource*/ + fn cuGraphicsUnregisterResource( + resource: cuda_types::CUgraphicsResource, + ) -> cuda_types::CUresult; + /** \brief Get an array through which to access a subresource of a mapped graphics resource. + + Returns in \p *pArray an array through which the subresource of the mapped + graphics resource \p resource which corresponds to array index \p arrayIndex + and mipmap level \p mipLevel may be accessed. The value set in \p *pArray may + change every time that \p resource is mapped. + + If \p resource is not a texture then it cannot be accessed via an array and + ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. + If \p arrayIndex is not a valid array index for \p resource then + ::CUDA_ERROR_INVALID_VALUE is returned. + If \p mipLevel is not a valid mipmap level for \p resource then + ::CUDA_ERROR_INVALID_VALUE is returned. + If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + + \param pArray - Returned array through which a subresource of \p resource may be accessed + \param resource - Mapped resource to access + \param arrayIndex - Array index for array textures or cubemap face + index as defined by ::CUarray_cubemap_face for + cubemap textures for the subresource to access + \param mipLevel - Mipmap level for the subresource to access + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_MAPPED, + ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY + \notefnerr + + \sa + ::cuGraphicsResourceGetMappedPointer, + ::cudaGraphicsSubResourceGetMappedArray*/ + fn cuGraphicsSubResourceGetMappedArray( + pArray: *mut cuda_types::CUarray, + resource: cuda_types::CUgraphicsResource, + arrayIndex: ::core::ffi::c_uint, + mipLevel: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Get a mipmapped array through which to access a mapped graphics resource. + + Returns in \p *pMipmappedArray a mipmapped array through which the mapped graphics + resource \p resource. The value set in \p *pMipmappedArray may change every time + that \p resource is mapped. + + If \p resource is not a texture then it cannot be accessed via a mipmapped array and + ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. + If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + + \param pMipmappedArray - Returned mipmapped array through which \p resource may be accessed + \param resource - Mapped resource to access + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_MAPPED, + ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY + \notefnerr + + \sa + ::cuGraphicsResourceGetMappedPointer, + ::cudaGraphicsResourceGetMappedMipmappedArray*/ + fn cuGraphicsResourceGetMappedMipmappedArray( + pMipmappedArray: *mut cuda_types::CUmipmappedArray, + resource: cuda_types::CUgraphicsResource, + ) -> cuda_types::CUresult; + /** \brief Get a device pointer through which to access a mapped graphics resource. + + Returns in \p *pDevPtr a pointer through which the mapped graphics resource + \p resource may be accessed. + Returns in \p pSize the size of the memory in bytes which may be accessed from that pointer. + The value set in \p pPointer may change every time that \p resource is mapped. + + If \p resource is not a buffer then it cannot be accessed via a pointer and + ::CUDA_ERROR_NOT_MAPPED_AS_POINTER is returned. + If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + * + \param pDevPtr - Returned pointer through which \p resource may be accessed + \param pSize - Returned size of the buffer accessible starting at \p *pPointer + \param resource - Mapped resource to access + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_MAPPED, + ::CUDA_ERROR_NOT_MAPPED_AS_POINTER + \notefnerr + + \sa + ::cuGraphicsMapResources, + ::cuGraphicsSubResourceGetMappedArray, + ::cudaGraphicsResourceGetMappedPointer*/ + fn cuGraphicsResourceGetMappedPointer_v2( + pDevPtr: *mut cuda_types::CUdeviceptr, + pSize: *mut usize, + resource: cuda_types::CUgraphicsResource, + ) -> cuda_types::CUresult; + /** \brief Set usage flags for mapping a graphics resource + + Set \p flags for mapping the graphics resource \p resource. + + Changes to \p flags will take effect the next time \p resource is mapped. + The \p flags argument may be any of the following: + + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + resource will be used. It is therefore assumed that this resource will be + read from and written to by CUDA kernels. This is the default value. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READONLY: Specifies that CUDA kernels which + access this resource will not write to this resource. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITEDISCARD: Specifies that CUDA kernels + which access this resource will not read from this resource and will + write over the entire contents of the resource, so none of the data + previously stored in the resource will be preserved. + + If \p resource is presently mapped for access by CUDA then + ::CUDA_ERROR_ALREADY_MAPPED is returned. + If \p flags is not one of the above values then ::CUDA_ERROR_INVALID_VALUE is returned. + + \param resource - Registered resource to set flags for + \param flags - Parameters for resource mapping + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED + \notefnerr + + \sa + ::cuGraphicsMapResources, + ::cudaGraphicsResourceSetMapFlags*/ + fn cuGraphicsResourceSetMapFlags_v2( + resource: cuda_types::CUgraphicsResource, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Map graphics resources for access by CUDA + + Maps the \p count graphics resources in \p resources for access by CUDA. + + The resources in \p resources may be accessed by CUDA until they + are unmapped. The graphics API from which \p resources were registered + should not access any resources while they are mapped by CUDA. If an + application does so, the results are undefined. + + This function provides the synchronization guarantee that any graphics calls + issued before ::cuGraphicsMapResources() will complete before any subsequent CUDA + work issued in \p stream begins. + + If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. + If any of \p resources are presently mapped for access by CUDA then ::CUDA_ERROR_ALREADY_MAPPED is returned. + + \param count - Number of resources to map + \param resources - Resources to map for CUDA usage + \param hStream - Stream with which to synchronize + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED, + ::CUDA_ERROR_UNKNOWN + \note_null_stream + \notefnerr + + \sa + ::cuGraphicsResourceGetMappedPointer, + ::cuGraphicsSubResourceGetMappedArray, + ::cuGraphicsUnmapResources, + ::cudaGraphicsMapResources*/ + fn cuGraphicsMapResources_ptsz( + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Unmap graphics resources. + + Unmaps the \p count graphics resources in \p resources. + + Once unmapped, the resources in \p resources may not be accessed by CUDA + until they are mapped again. + + This function provides the synchronization guarantee that any CUDA work issued + in \p stream before ::cuGraphicsUnmapResources() will complete before any + subsequently issued graphics work begins. + + + If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. + If any of \p resources are not presently mapped for access by CUDA then ::CUDA_ERROR_NOT_MAPPED is returned. + + \param count - Number of resources to unmap + \param resources - Resources to unmap + \param hStream - Stream with which to synchronize + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_MAPPED, + ::CUDA_ERROR_UNKNOWN + \note_null_stream + \notefnerr + + \sa + ::cuGraphicsMapResources, + ::cudaGraphicsUnmapResources*/ + fn cuGraphicsUnmapResources_ptsz( + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Returns the requested driver API function pointer + + Returns in \p **pfn the address of the CUDA driver function for the requested + CUDA version and flags. + + The CUDA version is specified as (1000 * major + 10 * minor), so CUDA 11.2 + should be specified as 11020. For a requested driver symbol, if the specified + CUDA version is greater than or equal to the CUDA version in which the driver symbol + was introduced, this API will return the function pointer to the corresponding + versioned function. + + The pointer returned by the API should be cast to a function pointer matching the + requested driver function's definition in the API header file. The function pointer + typedef can be picked up from the corresponding typedefs header file. For example, + cudaTypedefs.h consists of function pointer typedefs for driver APIs defined in cuda.h. + + The API will return ::CUDA_SUCCESS and set the returned \p pfn to NULL if the + requested driver function is not supported on the platform, no ABI + compatible driver function exists for the specified \p cudaVersion or if the + driver symbol is invalid. + + It will also set the optional \p symbolStatus to one of the values in + ::CUdriverProcAddressQueryResult with the following meanings: + - ::CU_GET_PROC_ADDRESS_SUCCESS - The requested symbol was succesfully found based + on input arguments and \p pfn is valid + - ::CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND - The requested symbol was not found + - ::CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT - The requested symbol was found but is + not supported by cudaVersion specified + + The requested flags can be: + - ::CU_GET_PROC_ADDRESS_DEFAULT: This is the default mode. This is equivalent to + ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM if the code is compiled with + --default-stream per-thread compilation flag or the macro CUDA_API_PER_THREAD_DEFAULT_STREAM + is defined; ::CU_GET_PROC_ADDRESS_LEGACY_STREAM otherwise. + - ::CU_GET_PROC_ADDRESS_LEGACY_STREAM: This will enable the search for all driver symbols + that match the requested driver symbol name except the corresponding per-thread versions. + - ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM: This will enable the search for all + driver symbols that match the requested driver symbol name including the per-thread + versions. If a per-thread version is not found, the API will return the legacy version + of the driver function. + + \param symbol - The base name of the driver API function to look for. As an example, + for the driver API ::cuMemAlloc_v2, \p symbol would be cuMemAlloc and + \p cudaVersion would be the ABI compatible CUDA version for the _v2 variant. + \param pfn - Location to return the function pointer to the requested driver function + \param cudaVersion - The CUDA version to look for the requested driver symbol + \param flags - Flags to specify search options. + \param symbolStatus - Optional location to store the status of the search for + \p symbol based on \p cudaVersion. See ::CUdriverProcAddressQueryResult + for possible values. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED + \note_version_mixing + + \sa + ::cudaGetDriverEntryPoint*/ + fn cuGetProcAddress_v2( + symbol: *const ::core::ffi::c_char, + pfn: *mut *mut ::core::ffi::c_void, + cudaVersion: ::core::ffi::c_int, + flags: cuda_types::cuuint64_t, + symbolStatus: *mut cuda_types::CUdriverProcAddressQueryResult, + ) -> cuda_types::CUresult; + /** \brief Allows caller to fetch a coredump attribute value for the current context + + Returns in \p *value the requested value specified by \p attrib. It is up to the caller + to ensure that the data type and size of \p *value matches the request. + + If the caller calls this function with \p *value equal to NULL, the size of the memory + region (in bytes) expected for \p attrib will be placed in \p size. + + The supported attributes are: + - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + The default value is ::false unless set to ::true globally or locally, or the + CU_CTX_USER_COREDUMP_ENABLE flag was set during context creation. + - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + also create a coredump. The default value is ::true unless set to ::false globally or + or locally. + - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + will not have a dump of GPU memory or non-reloc ELF images. The default value is + ::false unless set to ::true globally or locally. + - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + value is ::false unless set to ::true globally or locally. + - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + any coredumps generated by this context will be written. The default value is + ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + the CUDA applications and ::PID is the process ID of the CUDA application. + - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + that will be monitored if user-triggered coredumps are enabled. The default value is + ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + the CUDA application and ::PID is the process ID of the CUDA application. + + \param attrib - The enum defining which value to fetch. + \param value - void* containing the requested data. + \param size - The size of the memory region \p value points to. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + + \sa + ::cuCoredumpGetAttributeGlobal, + ::cuCoredumpSetAttribute, + ::cuCoredumpSetAttributeGlobal*/ + fn cuCoredumpGetAttribute( + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Allows caller to fetch a coredump attribute value for the entire application + + Returns in \p *value the requested value specified by \p attrib. It is up to the caller + to ensure that the data type and size of \p *value matches the request. + + If the caller calls this function with \p *value equal to NULL, the size of the memory + region (in bytes) expected for \p attrib will be placed in \p size. + + The supported attributes are: + - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + The default value is ::false. + - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + also create a coredump. The default value is ::true. + - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + will not have a dump of GPU memory or non-reloc ELF images. The default value is + ::false. + - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + value is ::false. + - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + any coredumps generated by this context will be written. The default value is + ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + the CUDA applications and ::PID is the process ID of the CUDA application. + - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + that will be monitored if user-triggered coredumps are enabled. The default value is + ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + the CUDA application and ::PID is the process ID of the CUDA application. + + \param attrib - The enum defining which value to fetch. + \param value - void* containing the requested data. + \param size - The size of the memory region \p value points to. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuCoredumpGetAttribute, + ::cuCoredumpSetAttribute, + ::cuCoredumpSetAttributeGlobal*/ + fn cuCoredumpGetAttributeGlobal( + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Allows caller to set a coredump attribute value for the current context + + This function should be considered an alternate interface to the CUDA-GDB environment + variables defined in this document: https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump + + An important design decision to note is that any coredump environment variable values + set before CUDA initializes will take permanent precedence over any values set with this + this function. This decision was made to ensure no change in behavior for any users that + may be currently using these variables to get coredumps. + + \p *value shall contain the requested value specified by \p set. It is up to the caller + to ensure that the data type and size of \p *value matches the request. + + If the caller calls this function with \p *value equal to NULL, the size of the memory + region (in bytes) expected for \p set will be placed in \p size. + + /note This function will return ::CUDA_ERROR_NOT_SUPPORTED if the caller attempts to set + ::CU_COREDUMP_ENABLE_ON_EXCEPTION on a GPU of with Compute Capability < 6.0. ::cuCoredumpSetAttributeGlobal + works on those platforms as an alternative. + + /note ::CU_COREDUMP_ENABLE_USER_TRIGGER and ::CU_COREDUMP_PIPE cannot be set on a per-context basis. + + The supported attributes are: + - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + The default value is ::false. + - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + also create a coredump. The default value is ::true. + - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + will not have a dump of GPU memory or non-reloc ELF images. The default value is + ::false. + - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + any coredumps generated by this context will be written. The default value is + ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + the CUDA applications and ::PID is the process ID of the CUDA application. + + \param attrib - The enum defining which value to set. + \param value - void* containing the requested data. + \param size - The size of the memory region \p value points to. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_PERMITTED, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED, + ::CUDA_ERROR_NOT_SUPPORTED + + \sa + ::cuCoredumpGetAttributeGlobal, + ::cuCoredumpGetAttribute, + ::cuCoredumpSetAttributeGlobal*/ + fn cuCoredumpSetAttribute( + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, + ) -> cuda_types::CUresult; + /** \brief Allows caller to set a coredump attribute value globally + + This function should be considered an alternate interface to the CUDA-GDB environment + variables defined in this document: https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump + + An important design decision to note is that any coredump environment variable values + set before CUDA initializes will take permanent precedence over any values set with this + this function. This decision was made to ensure no change in behavior for any users that + may be currently using these variables to get coredumps. + + \p *value shall contain the requested value specified by \p set. It is up to the caller + to ensure that the data type and size of \p *value matches the request. + + If the caller calls this function with \p *value equal to NULL, the size of the memory + region (in bytes) expected for \p set will be placed in \p size. + + The supported attributes are: + - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + The default value is ::false. + - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + also create a coredump. The default value is ::true. + - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + will not have a dump of GPU memory or non-reloc ELF images. The default value is + ::false. + - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + value is ::false. + - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + any coredumps generated by this context will be written. The default value is + ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + the CUDA applications and ::PID is the process ID of the CUDA application. + - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + that will be monitored if user-triggered coredumps are enabled. This value may not be + changed after ::CU_COREDUMP_ENABLE_USER_TRIGGER is set to ::true. The default + value is ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine + running the CUDA application and ::PID is the process ID of the CUDA application. + + \param attrib - The enum defining which value to set. + \param value - void* containing the requested data. + \param size - The size of the memory region \p value points to. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_PERMITTED + + \sa + ::cuCoredumpGetAttribute, + ::cuCoredumpGetAttributeGlobal, + ::cuCoredumpSetAttribute*/ + fn cuCoredumpSetAttributeGlobal( + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, + ) -> cuda_types::CUresult; + /// @} + fn cuGetExportTable( + ppExportTable: *mut *const ::core::ffi::c_void, + pExportTableId: *const cuda_types::CUuuid, + ) -> cuda_types::CUresult; + /** \brief Creates a green context with a specified set of resources. + + This API creates a green context with the resources specified in the descriptor \p desc and + returns it in the handle represented by \p phCtx. This API will retain the primary context on device \p dev, + which will is released when the green context is destroyed. It is advised to have the primary context active + before calling this API to avoid the heavy cost of triggering primary context initialization and + deinitialization multiple times. + + The API does not set the green context current. In order to set it current, you need to explicitly set it current + by first converting the green context to a CUcontext using ::cuCtxFromGreenCtx and subsequently calling + ::cuCtxSetCurrent / ::cuCtxPushCurrent. It should be noted that a green context can be current to only one + thread at a time. There is no internal synchronization to make API calls accessing the same green context + from multiple threads work. + + Note: The API is not supported on 32-bit platforms. + + \param phCtx - Pointer for the output handle to the green context + \param desc - Descriptor generated via ::cuDevResourceGenerateDesc which contains the set of resources to be used + \param dev - Device on which to create the green context. + \param flags - One of the supported green context creation flags. \p CU_GREEN_CTX_DEFAULT_STREAM is required. + + The supported flags are: + - \p CU_GREEN_CTX_DEFAULT_STREAM : Creates a default stream to use inside the green context. Required. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_NOT_SUPPORTED, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa + ::cuGreenCtxDestroy, + ::cuCtxFromGreenCtx, + ::cuCtxSetCurrent, + ::cuCtxPushCurrent, + ::cuDevResourceGenerateDesc, + ::cuDevicePrimaryCtxRetain, + ::cuCtxCreate, + ::cuCtxCreate_v3*/ + fn cuGreenCtxCreate( + phCtx: *mut cuda_types::CUgreenCtx, + desc: cuda_types::CUdevResourceDesc, + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Destroys a green context + + Destroys the green context, releasing the primary context of the device that this green context was created for. + Any resources provisioned for this green context (that were initially available via the resource descriptor) + are released as well. + \param hCtx - Green context to be destroyed + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_CONTEXT_IS_DESTROYED + + \sa + ::cuGreenCtxCreate, + ::cuCtxDestroy*/ + fn cuGreenCtxDestroy(hCtx: cuda_types::CUgreenCtx) -> cuda_types::CUresult; + /** \brief Converts a green context into the primary context + + The API converts a green context into the primary context returned in \p pContext. It is important + to note that the converted context \p pContext is a normal primary context but with + the resources of the specified green context \p hCtx. Once converted, it can then + be used to set the context current with ::cuCtxSetCurrent or with any of the CUDA APIs + that accept a CUcontext parameter. + + Users are expected to call this API before calling any CUDA APIs that accept a + CUcontext. Failing to do so will result in the APIs returning ::CUDA_ERROR_INVALID_CONTEXT. + + \param pContext Returned primary context with green context resources + \param hCtx Green context to convert + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuGreenCtxCreate*/ + fn cuCtxFromGreenCtx( + pContext: *mut cuda_types::CUcontext, + hCtx: cuda_types::CUgreenCtx, + ) -> cuda_types::CUresult; + /** \brief Get device resources + + Get the \p type resources available to the \p device. + This may often be the starting point for further partitioning or configuring of resources. + + Note: The API is not supported on 32-bit platforms. + + \param device - Device to get resource for + \param resource - Output pointer to a CUdevResource structure + \param type - Type of resource to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_RESOURCE_TYPE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_DEVICE + + \sa + ::cuDevResourceGenerateDesc*/ + fn cuDeviceGetDevResource( + device: cuda_types::CUdevice, + resource: *mut cuda_types::CUdevResource, + type_: cuda_types::CUdevResourceType, + ) -> cuda_types::CUresult; + /** \brief Get context resources + + Get the \p type resources available to the context represented by \p hCtx + \param hCtx - Context to get resource for + + Note: The API is not supported on 32-bit platforms. + + \param resource - Output pointer to a CUdevResource structure + \param type - Type of resource to retrieve + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_RESOURCE_TYPE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_CONTEXT + + \sa + ::cuDevResourceGenerateDesc*/ + fn cuCtxGetDevResource( + hCtx: cuda_types::CUcontext, + resource: *mut cuda_types::CUdevResource, + type_: cuda_types::CUdevResourceType, + ) -> cuda_types::CUresult; + /** \brief Get green context resources + + Get the \p type resources available to the green context represented by \p hCtx + \param hCtx - Green context to get resource for + \param resource - Output pointer to a CUdevResource structure + \param type - Type of resource to retrieve + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_RESOURCE_TYPE, + ::CUDA_ERROR_INVALID_VALUE + + \sa + ::cuDevResourceGenerateDesc*/ + fn cuGreenCtxGetDevResource( + hCtx: cuda_types::CUgreenCtx, + resource: *mut cuda_types::CUdevResource, + type_: cuda_types::CUdevResourceType, + ) -> cuda_types::CUresult; + /** \brief Splits \p CU_DEV_RESOURCE_TYPE_SM resources. + + Splits \p CU_DEV_RESOURCE_TYPE_SM resources into \p nbGroups, adhering to the minimum SM count specified in \p minCount + and the usage flags in \p useFlags. If \p result is NULL, the API simulates a split and provides the amount of groups that + would be created in \p nbGroups. Otherwise, \p nbGroups must point to the amount of elements in \p result and on return, + the API will overwrite \p nbGroups with the amount actually created. The groups are written to the array in \p result. + \p nbGroups can be less than the total amount if a smaller number of groups is needed. + + This API is used to spatially partition the input resource. The input resource needs to come from one of + ::cuDeviceGetDevResource, ::cuCtxGetDevResource, or ::cuGreenCtxGetDevResource. + A limitation of the API is that the output results cannot be split again without + first creating a descriptor and a green context with that descriptor. + + When creating the groups, the API will take into account the performance and functional characteristics of the + input resource, and guarantee a split that will create a disjoint set of symmetrical partitions. This may lead to less groups created + than purely dividing the total SM count by the \p minCount due to cluster requirements or + alignment and granularity requirements for the minCount. + + The \p remainder set, might not have the same functional or performance guarantees as the groups in \p result. + Its use should be carefully planned and future partitions of the \p remainder set are discouraged. + + A successful API call must either have: + - A valid array of \p result pointers of size passed in \p nbGroups, with \p Input of type \p CU_DEV_RESOURCE_TYPE_SM. + Value of \p minCount must be between 0 and the SM count specified in \p input. \p remaining and \p useFlags are optional. + - NULL passed in for \p result, with a valid integer pointer in \p nbGroups and \p Input of type \p CU_DEV_RESOURCE_TYPE_SM. + Value of \p minCount must be between 0 and the SM count specified in \p input. + This queries the number of groups that would be created by the API. + + Note: The API is not supported on 32-bit platforms. + + \param result - Output array of \p CUdevResource resources. Can be NULL to query the number of groups. + \param nbGroups - This is a pointer, specifying the number of groups that would be or should be created as described below. + \param input - Input SM resource to be split. Must be a valid \p CU_DEV_RESOURCE_TYPE_SM resource. + \param remaining - If the input resource cannot be cleanly split among \p nbGroups, the remaining is placed in here. + Can be ommitted (NULL) if the user does not need the remaining set. + \param useFlags - Flags specifying how these partitions are used or which constraints to abide by when splitting the input. + \param minCount - Minimum number of SMs required + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_RESOURCE_TYPE, + ::CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION + + \sa + ::cuGreenCtxGetDevResource, + ::cuCtxGetDevResource, + ::cuDeviceGetDevResource*/ + fn cuDevSmResourceSplitByCount( + result: *mut cuda_types::CUdevResource, + nbGroups: *mut ::core::ffi::c_uint, + input: *const cuda_types::CUdevResource, + remaining: *mut cuda_types::CUdevResource, + useFlags: ::core::ffi::c_uint, + minCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Generate a resource descriptor + + Generates a resource descriptor with the set of resources specified in \p resources. + The generated resource descriptor is necessary for the creation of green contexts via the ::cuGreenCtxCreate API. + The API expects \p nbResources == 1, as there is only one type of resource and merging the same + types of resource is currently not supported. + + Note: The API is not supported on 32-bit platforms. + + \param phDesc - Output descriptor + \param resources - Array of resources to be included in the descriptor + \param nbResources - Number of resources passed in \p resources + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_RESOURCE_TYPE, + ::CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION + + \sa + ::cuDevSmResourceSplitByCount*/ + fn cuDevResourceGenerateDesc( + phDesc: *mut cuda_types::CUdevResourceDesc, + resources: *mut cuda_types::CUdevResource, + nbResources: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Records an event. + + Captures in \phEvent all the activities of the green context of \phCtx + at the time of this call. \phEvent and \phCtx must be from the same + CUDA context. Calls such as ::cuEventQuery() or ::cuGreenCtxWaitEvent() will + then examine or wait for completion of the work that was captured. Uses of + \p hCtx after this call do not modify \p hEvent. + + \note The API will return an error if the specified green context \p hCtx + has a stream in the capture mode. In such a case, the call will invalidate + all the conflicting captures. + + \param hCtx - Green context to record event for + \param hEvent - Event to record + + \return + ::CUDA_SUCCESS + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE + + \sa + ::cuGreenCtxWaitEvent, + ::cuEventRecord*/ + fn cuGreenCtxRecordEvent( + hCtx: cuda_types::CUgreenCtx, + hEvent: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Make a green context wait on an event + + Makes all future work submitted to green context \phCtx wait for all work + captured in \phEvent. The synchronization will be performed on the device + and will not block the calling CPU thread. See ::cuGreenCtxRecordEvent() + for details on what is captured by an event. + + \note The API will return an error and invalidate the capture if the specified + event \p hEvent is part of an ongoing capture sequence. + + \param hCtx - Green context to wait + \param hEvent - Event to wait on (may not be NULL) + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE + + \sa + ::cuGreenCtxRecordEvent, + ::cuStreamWaitEvent*/ + fn cuGreenCtxWaitEvent( + hCtx: cuda_types::CUgreenCtx, + hEvent: cuda_types::CUevent, + ) -> cuda_types::CUresult; + /** \brief Query the green context associated with a stream + + Returns the CUDA green context that the stream is associated with, or NULL if the stream + is not associated with any green context. + + The stream handle \p hStream can refer to any of the following: +
    +
  • + a stream created via any of the CUDA driver APIs such as ::cuStreamCreate. + If during stream creation the context that was active in the calling thread was obtained + with cuCtxFromGreenCtx, that green context is returned in \p phCtx. + Otherwise, \p *phCtx is set to NULL instead. +
  • +
  • + special stream such as the NULL stream or ::CU_STREAM_LEGACY. + In that case if context that is active in the calling thread was obtained + with cuCtxFromGreenCtx, that green context is returned. + Otherwise, \p *phCtx is set to NULL instead. +
  • +
+ Passing an invalid handle will result in undefined behavior. + + \param hStream - Handle to the stream to be queried + \param phCtx - Returned green context associated with the stream + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_HANDLE, + \notefnerr + + \sa ::cuStreamDestroy, + ::cuStreamCreateWithPriority, + ::cuStreamGetPriority, + ::cuStreamGetFlags, + ::cuStreamWaitEvent, + ::cuStreamQuery, + ::cuStreamSynchronize, + ::cuStreamAddCallback, + ::cudaStreamCreate, + ::cudaStreamCreateWithFlags*/ + fn cuStreamGetGreenCtx( + hStream: cuda_types::CUstream, + phCtx: *mut cuda_types::CUgreenCtx, + ) -> cuda_types::CUresult; + fn cuMemHostRegister( + p: *mut ::core::ffi::c_void, + bytesize: usize, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuGraphicsResourceSetMapFlags( + resource: cuda_types::CUgraphicsResource, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuLinkCreate( + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + stateOut: *mut cuda_types::CUlinkState, + ) -> cuda_types::CUresult; + fn cuLinkAddData( + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + data: *mut ::core::ffi::c_void, + size: usize, + name: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + fn cuLinkAddFile( + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + path: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + fn cuTexRefSetAddress2D_v2( + hTexRef: cuda_types::CUtexref, + desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR, + dptr: cuda_types::CUdeviceptr, + Pitch: usize, + ) -> cuda_types::CUresult; + fn cuDeviceTotalMem( + bytes: *mut ::core::ffi::c_uint, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + fn cuCtxCreate( + pctx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + fn cuModuleGetGlobal( + dptr: *mut cuda_types::CUdeviceptr_v1, + bytes: *mut ::core::ffi::c_uint, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, + ) -> cuda_types::CUresult; + fn cuMemGetInfo( + free: *mut ::core::ffi::c_uint, + total: *mut ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemAlloc( + dptr: *mut cuda_types::CUdeviceptr_v1, + bytesize: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemAllocPitch( + dptr: *mut cuda_types::CUdeviceptr_v1, + pPitch: *mut ::core::ffi::c_uint, + WidthInBytes: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, + ElementSizeBytes: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemFree(dptr: cuda_types::CUdeviceptr_v1) -> cuda_types::CUresult; + fn cuMemGetAddressRange( + pbase: *mut cuda_types::CUdeviceptr_v1, + psize: *mut ::core::ffi::c_uint, + dptr: cuda_types::CUdeviceptr_v1, + ) -> cuda_types::CUresult; + fn cuMemAllocHost( + pp: *mut *mut ::core::ffi::c_void, + bytesize: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemHostGetDevicePointer( + pdptr: *mut cuda_types::CUdeviceptr_v1, + p: *mut ::core::ffi::c_void, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyHtoD( + dstDevice: cuda_types::CUdeviceptr_v1, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoH( + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoD( + dstDevice: cuda_types::CUdeviceptr_v1, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoA( + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoD( + dstDevice: cuda_types::CUdeviceptr_v1, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyHtoA( + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoH( + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoA( + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyHtoAAsync( + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoHAsync( + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpy2D(pCopy: *const cuda_types::CUDA_MEMCPY2D_v1) -> cuda_types::CUresult; + fn cuMemcpy2DUnaligned( + pCopy: *const cuda_types::CUDA_MEMCPY2D_v1, + ) -> cuda_types::CUresult; + fn cuMemcpy3D(pCopy: *const cuda_types::CUDA_MEMCPY3D_v1) -> cuda_types::CUresult; + fn cuMemcpyHtoDAsync( + dstDevice: cuda_types::CUdeviceptr_v1, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoHAsync( + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoDAsync( + dstDevice: cuda_types::CUdeviceptr_v1, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpy2DAsync( + pCopy: *const cuda_types::CUDA_MEMCPY2D_v1, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpy3DAsync( + pCopy: *const cuda_types::CUDA_MEMCPY3D_v1, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD8( + dstDevice: cuda_types::CUdeviceptr_v1, + uc: ::core::ffi::c_uchar, + N: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemsetD16( + dstDevice: cuda_types::CUdeviceptr_v1, + us: ::core::ffi::c_ushort, + N: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemsetD32( + dstDevice: cuda_types::CUdeviceptr_v1, + ui: ::core::ffi::c_uint, + N: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemsetD2D8( + dstDevice: cuda_types::CUdeviceptr_v1, + dstPitch: ::core::ffi::c_uint, + uc: ::core::ffi::c_uchar, + Width: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemsetD2D16( + dstDevice: cuda_types::CUdeviceptr_v1, + dstPitch: ::core::ffi::c_uint, + us: ::core::ffi::c_ushort, + Width: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemsetD2D32( + dstDevice: cuda_types::CUdeviceptr_v1, + dstPitch: ::core::ffi::c_uint, + ui: ::core::ffi::c_uint, + Width: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuArrayCreate( + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1, + ) -> cuda_types::CUresult; + fn cuArrayGetDescriptor( + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR_v1, + hArray: cuda_types::CUarray, + ) -> cuda_types::CUresult; + fn cuArray3DCreate( + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1, + ) -> cuda_types::CUresult; + fn cuArray3DGetDescriptor( + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1, + hArray: cuda_types::CUarray, + ) -> cuda_types::CUresult; + fn cuTexRefSetAddress( + ByteOffset: *mut ::core::ffi::c_uint, + hTexRef: cuda_types::CUtexref, + dptr: cuda_types::CUdeviceptr_v1, + bytes: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuTexRefSetAddress2D( + hTexRef: cuda_types::CUtexref, + desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1, + dptr: cuda_types::CUdeviceptr_v1, + Pitch: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuTexRefGetAddress( + pdptr: *mut cuda_types::CUdeviceptr_v1, + hTexRef: cuda_types::CUtexref, + ) -> cuda_types::CUresult; + fn cuGraphicsResourceGetMappedPointer( + pDevPtr: *mut cuda_types::CUdeviceptr_v1, + pSize: *mut ::core::ffi::c_uint, + resource: cuda_types::CUgraphicsResource, + ) -> cuda_types::CUresult; + fn cuCtxDestroy(ctx: cuda_types::CUcontext) -> cuda_types::CUresult; + fn cuCtxPopCurrent(pctx: *mut cuda_types::CUcontext) -> cuda_types::CUresult; + fn cuCtxPushCurrent(ctx: cuda_types::CUcontext) -> cuda_types::CUresult; + fn cuStreamDestroy(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + fn cuEventDestroy(hEvent: cuda_types::CUevent) -> cuda_types::CUresult; + fn cuDevicePrimaryCtxRelease(dev: cuda_types::CUdevice) -> cuda_types::CUresult; + fn cuDevicePrimaryCtxReset(dev: cuda_types::CUdevice) -> cuda_types::CUresult; + fn cuDevicePrimaryCtxSetFlags( + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemcpyHtoD_v2( + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoH_v2( + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoD_v2( + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoA_v2( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoD_v2( + dstDevice: cuda_types::CUdeviceptr, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyHtoA_v2( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoH_v2( + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoA_v2( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyHtoAAsync_v2( + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpyAtoHAsync_v2( + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpy2D_v2(pCopy: *const cuda_types::CUDA_MEMCPY2D) -> cuda_types::CUresult; + fn cuMemcpy2DUnaligned_v2( + pCopy: *const cuda_types::CUDA_MEMCPY2D, + ) -> cuda_types::CUresult; + fn cuMemcpy3D_v2(pCopy: *const cuda_types::CUDA_MEMCPY3D) -> cuda_types::CUresult; + fn cuMemcpyHtoDAsync_v2( + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoHAsync_v2( + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpyDtoDAsync_v2( + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpy2DAsync_v2( + pCopy: *const cuda_types::CUDA_MEMCPY2D, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpy3DAsync_v2( + pCopy: *const cuda_types::CUDA_MEMCPY3D, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD8_v2( + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, + ) -> cuda_types::CUresult; + fn cuMemsetD16_v2( + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, + ) -> cuda_types::CUresult; + fn cuMemsetD32_v2( + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, + ) -> cuda_types::CUresult; + fn cuMemsetD2D8_v2( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, + ) -> cuda_types::CUresult; + fn cuMemsetD2D16_v2( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, + ) -> cuda_types::CUresult; + fn cuMemsetD2D32_v2( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, + ) -> cuda_types::CUresult; + fn cuMemcpy( + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyAsync( + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpyPeer( + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, + ) -> cuda_types::CUresult; + fn cuMemcpyPeerAsync( + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemcpy3DPeer( + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, + ) -> cuda_types::CUresult; + fn cuMemcpy3DPeerAsync( + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD8Async( + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD16Async( + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD32Async( + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD2D8Async( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD2D16Async( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemsetD2D32Async( + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuStreamGetPriority( + hStream: cuda_types::CUstream, + priority: *mut ::core::ffi::c_int, + ) -> cuda_types::CUresult; + fn cuStreamGetId( + hStream: cuda_types::CUstream, + streamId: *mut ::core::ffi::c_ulonglong, + ) -> cuda_types::CUresult; + fn cuStreamGetFlags( + hStream: cuda_types::CUstream, + flags: *mut ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamGetCtx( + hStream: cuda_types::CUstream, + pctx: *mut cuda_types::CUcontext, + ) -> cuda_types::CUresult; + fn cuStreamWaitEvent( + hStream: cuda_types::CUstream, + hEvent: cuda_types::CUevent, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamAddCallback( + hStream: cuda_types::CUstream, + callback: cuda_types::CUstreamCallback, + userData: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamAttachMemAsync( + hStream: cuda_types::CUstream, + dptr: cuda_types::CUdeviceptr, + length: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamQuery(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + fn cuStreamSynchronize(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + fn cuEventRecord( + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuEventRecordWithFlags( + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuLaunchKernel( + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + fn cuLaunchKernelEx( + config: *const cuda_types::CUlaunchConfig, + f: cuda_types::CUfunction, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + fn cuLaunchHostFunc( + hStream: cuda_types::CUstream, + fn_: cuda_types::CUhostFn, + userData: *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + fn cuGraphicsMapResources( + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuGraphicsUnmapResources( + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuStreamWriteValue32( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWaitValue32( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWriteValue64( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWaitValue64( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamBatchMemOp( + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWriteValue32_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWaitValue32_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWriteValue64_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWaitValue64_ptsz( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamBatchMemOp_ptsz( + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWriteValue32_v2( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWaitValue32_v2( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWriteValue64_v2( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamWaitValue64_v2( + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamBatchMemOp_v2( + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuMemPrefetchAsync( + devPtr: cuda_types::CUdeviceptr, + count: usize, + dstDevice: cuda_types::CUdevice, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemPrefetchAsync_v2( + devPtr: cuda_types::CUdeviceptr, + count: usize, + location: cuda_types::CUmemLocation, + flags: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuLaunchCooperativeKernel( + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, + ) -> cuda_types::CUresult; + fn cuSignalExternalSemaphoresAsync( + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuWaitExternalSemaphoresAsync( + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuStreamBeginCapture(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + fn cuStreamBeginCapture_ptsz(hStream: cuda_types::CUstream) -> cuda_types::CUresult; + fn cuStreamBeginCapture_v2( + hStream: cuda_types::CUstream, + mode: cuda_types::CUstreamCaptureMode, + ) -> cuda_types::CUresult; + fn cuStreamBeginCaptureToGraph( + hStream: cuda_types::CUstream, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + mode: cuda_types::CUstreamCaptureMode, + ) -> cuda_types::CUresult; + fn cuStreamEndCapture( + hStream: cuda_types::CUstream, + phGraph: *mut cuda_types::CUgraph, + ) -> cuda_types::CUresult; + fn cuStreamIsCapturing( + hStream: cuda_types::CUstream, + captureStatus: *mut cuda_types::CUstreamCaptureStatus, + ) -> cuda_types::CUresult; + fn cuStreamGetCaptureInfo( + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + ) -> cuda_types::CUresult; + fn cuStreamGetCaptureInfo_ptsz( + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + ) -> cuda_types::CUresult; + fn cuStreamGetCaptureInfo_v2( + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + numDependencies_out: *mut usize, + ) -> cuda_types::CUresult; + fn cuStreamGetCaptureInfo_v3( + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + edgeData_out: *mut *const cuda_types::CUgraphEdgeData, + numDependencies_out: *mut usize, + ) -> cuda_types::CUresult; + fn cuGraphAddKernelNode( + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, + ) -> cuda_types::CUresult; + fn cuGraphKernelNodeGetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, + ) -> cuda_types::CUresult; + fn cuGraphKernelNodeSetParams( + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, + ) -> cuda_types::CUresult; + fn cuGraphExecKernelNodeSetParams( + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, + ) -> cuda_types::CUresult; + fn cuGraphInstantiateWithParams( + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS, + ) -> cuda_types::CUresult; + fn cuGraphExecUpdate( + hGraphExec: cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + hErrorNode_out: *mut cuda_types::CUgraphNode, + updateResult_out: *mut cuda_types::CUgraphExecUpdateResult, + ) -> cuda_types::CUresult; + fn cuGraphUpload( + hGraph: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuGraphLaunch( + hGraph: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuStreamCopyAttributes( + dstStream: cuda_types::CUstream, + srcStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuStreamGetAttribute( + hStream: cuda_types::CUstream, + attr: cuda_types::CUstreamAttrID, + value: *mut cuda_types::CUstreamAttrValue, + ) -> cuda_types::CUresult; + fn cuStreamSetAttribute( + hStream: cuda_types::CUstream, + attr: cuda_types::CUstreamAttrID, + param: *const cuda_types::CUstreamAttrValue, + ) -> cuda_types::CUresult; + fn cuIpcOpenMemHandle( + pdptr: *mut cuda_types::CUdeviceptr, + handle: cuda_types::CUipcMemHandle, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuGraphInstantiate( + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + phErrorNode: *mut cuda_types::CUgraphNode, + logBuffer: *mut ::core::ffi::c_char, + bufferSize: usize, + ) -> cuda_types::CUresult; + fn cuGraphInstantiate_v2( + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + phErrorNode: *mut cuda_types::CUgraphNode, + logBuffer: *mut ::core::ffi::c_char, + bufferSize: usize, + ) -> cuda_types::CUresult; + fn cuMemMapArrayAsync( + mapInfoList: *mut cuda_types::CUarrayMapInfo, + count: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemFreeAsync( + dptr: cuda_types::CUdeviceptr, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemAllocAsync( + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuMemAllocFromPoolAsync( + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + pool: cuda_types::CUmemoryPool, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuStreamUpdateCaptureDependencies( + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + numDependencies: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuStreamUpdateCaptureDependencies_v2( + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuGetProcAddress( + symbol: *const ::core::ffi::c_char, + pfn: *mut *mut ::core::ffi::c_void, + cudaVersion: ::core::ffi::c_int, + flags: cuda_types::cuuint64_t, + ) -> cuda_types::CUresult; + /** \brief Initialize the profiling. + + \deprecated + + Note that this function is deprecated and should not be used. + Starting with CUDA 12.0, it always returns error code ::CUDA_ERROR_NOT_SUPPORTED. + + Using this API user can initialize the CUDA profiler by specifying + the configuration file, output file and output file format. This + API is generally used to profile different set of counters by + looping the kernel launch. The \p configFile parameter can be used + to select profiling options including profiler counters. Refer to + the "Compute Command Line Profiler User Guide" for supported + profiler options and counters. + + Limitation: The CUDA profiler cannot be initialized with this API + if another profiling tool is already active, as indicated by the + ::CUDA_ERROR_PROFILER_DISABLED return code. + + Typical usage of the profiling APIs is as follows: + + for each set of counters/options\n + {\n + cuProfilerInitialize(); //Initialize profiling, set the counters or options in the config file \n + ...\n + cuProfilerStart(); \n + // code to be profiled \n + cuProfilerStop(); \n + ...\n + cuProfilerStart(); \n + // code to be profiled \n + cuProfilerStop(); \n + ...\n + }\n + + \param configFile - Name of the config file that lists the counters/options + for profiling. + \param outputFile - Name of the outputFile where the profiling results will + be stored. + \param outputMode - outputMode, can be ::CU_OUT_KEY_VALUE_PAIR or ::CU_OUT_CSV. + + \return + ::CUDA_ERROR_NOT_SUPPORTED + \notefnerr + + \sa + ::cuProfilerStart, + ::cuProfilerStop,*/ + fn cuProfilerInitialize( + configFile: *const ::core::ffi::c_char, + outputFile: *const ::core::ffi::c_char, + outputMode: cuda_types::CUoutput_mode, + ) -> cuda_types::CUresult; + /** \brief Enable profiling. + + Enables profile collection by the active profiling tool for the + current context. If profiling is already enabled, then + cuProfilerStart() has no effect. + + cuProfilerStart and cuProfilerStop APIs are used to + programmatically control the profiling granularity by allowing + profiling to be done only on selective pieces of code. + + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa + ::cuProfilerInitialize, + ::cuProfilerStop, + ::cudaProfilerStart*/ + fn cuProfilerStart() -> cuda_types::CUresult; + /** \brief Disable profiling. + + Disables profile collection by the active profiling tool for the + current context. If profiling is already disabled, then + cuProfilerStop() has no effect. + + cuProfilerStart and cuProfilerStop APIs are used to + programmatically control the profiling granularity by allowing + profiling to be done only on selective pieces of code. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_CONTEXT + \notefnerr + + \sa + ::cuProfilerInitialize, + ::cuProfilerStart, + ::cudaProfilerStop*/ + fn cuProfilerStop() -> cuda_types::CUresult; + /** \brief Registers an OpenGL buffer object + + Registers the buffer object specified by \p buffer for access by + CUDA. A handle to the registered object is returned as \p + pCudaResource. The register flags \p Flags specify the intended usage, + as follows: + + - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this + resource will be used. It is therefore assumed that this resource will be + read from and written to by CUDA. This is the default value. + - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA + will not write to this resource. + - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that + CUDA will not read from this resource and will write over the + entire contents of the resource, so none of the data previously + stored in the resource will be preserved. + + \param pCudaResource - Pointer to the returned object handle + \param buffer - name of buffer object to be registered + \param Flags - Register flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_OPERATING_SYSTEM + \notefnerr + + \sa + ::cuGraphicsUnregisterResource, + ::cuGraphicsMapResources, + ::cuGraphicsResourceGetMappedPointer, + ::cudaGraphicsGLRegisterBuffer*/ + fn cuGraphicsGLRegisterBuffer( + pCudaResource: *mut cuda_types::CUgraphicsResource, + buffer: cuda_types::GLuint, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Register an OpenGL texture or renderbuffer object + + Registers the texture or renderbuffer object specified by \p image for access by CUDA. + A handle to the registered object is returned as \p pCudaResource. + + \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D, + ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY, + or ::GL_RENDERBUFFER. + + The register flags \p Flags specify the intended usage, as follows: + + - ::CU_GRAPHICS_REGISTER_FLAGS_NONE: Specifies no hints about how this + resource will be used. It is therefore assumed that this resource will be + read from and written to by CUDA. This is the default value. + - ::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: Specifies that CUDA + will not write to this resource. + - ::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: Specifies that + CUDA will not read from this resource and will write over the + entire contents of the resource, so none of the data previously + stored in the resource will be preserved. + - ::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: Specifies that CUDA will + bind this resource to a surface reference. + - ::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: Specifies that CUDA will perform + texture gather operations on this resource. + + The following image formats are supported. For brevity's sake, the list is abbreviated. + For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats + {GL_R8, GL_R16, GL_RG8, GL_RG16} : + - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY + - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I} + - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X + {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT} + + The following image classes are currently disallowed: + - Textures with borders + - Multisampled renderbuffers + + \param pCudaResource - Pointer to the returned object handle + \param image - name of texture or renderbuffer object to be registered + \param target - Identifies the type of object specified by \p image + \param Flags - Register flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_OPERATING_SYSTEM + \notefnerr + + \sa + ::cuGraphicsUnregisterResource, + ::cuGraphicsMapResources, + ::cuGraphicsSubResourceGetMappedArray, + ::cudaGraphicsGLRegisterImage*/ + fn cuGraphicsGLRegisterImage( + pCudaResource: *mut cuda_types::CUgraphicsResource, + image: cuda_types::GLuint, + target: cuda_types::GLenum, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Gets the CUDA devices associated with the current OpenGL context + + Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices + corresponding to the current OpenGL context. Also returns in \p *pCudaDevices + at most cudaDeviceCount of the CUDA-compatible devices corresponding to + the current OpenGL context. If any of the GPUs being used by the current OpenGL + context are not CUDA capable then the call will return CUDA_ERROR_NO_DEVICE. + + The \p deviceList argument may be any of the following: + - ::CU_GL_DEVICE_LIST_ALL: Query all devices used by the current OpenGL context. + - ::CU_GL_DEVICE_LIST_CURRENT_FRAME: Query the devices used by the current OpenGL context to + render the current frame (in SLI). + - ::CU_GL_DEVICE_LIST_NEXT_FRAME: Query the devices used by the current OpenGL context to + render the next frame (in SLI). Note that this is a prediction, it can't be guaranteed that + this is correct in all cases. + + \param pCudaDeviceCount - Returned number of CUDA devices. + \param pCudaDevices - Returned CUDA devices. + \param cudaDeviceCount - The size of the output device array pCudaDevices. + \param deviceList - The set of devices to return. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NO_DEVICE, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_GRAPHICS_CONTEXT, + ::CUDA_ERROR_OPERATING_SYSTEM + + \notefnerr + + \sa + ::cuWGLGetDevice, + ::cudaGLGetDevices*/ + fn cuGLGetDevices_v2( + pCudaDeviceCount: *mut ::core::ffi::c_uint, + pCudaDevices: *mut cuda_types::CUdevice, + cudaDeviceCount: ::core::ffi::c_uint, + deviceList: cuda_types::CUGLDeviceList, + ) -> cuda_types::CUresult; + /** \brief Create a CUDA context for interoperability with OpenGL + + \deprecated This function is deprecated as of Cuda 5.0. + + This function is deprecated and should no longer be used. It is + no longer necessary to associate a CUDA context with an OpenGL + context in order to achieve maximum interoperability performance. + + \param pCtx - Returned CUDA context + \param Flags - Options for CUDA context creation + \param device - Device on which to create the context + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuCtxCreate, ::cuGLInit, ::cuGLMapBufferObject, + ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync, + ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags, + ::cuWGLGetDevice*/ + fn cuGLCtxCreate_v2( + pCtx: *mut cuda_types::CUcontext, + Flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + /** \brief Initializes OpenGL interoperability + + \deprecated This function is deprecated as of Cuda 3.0. + + Initializes OpenGL interoperability. This function is deprecated + and calling it is no longer required. It may fail if the needed + OpenGL driver facilities are not available. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_UNKNOWN + \notefnerr + + \sa ::cuGLMapBufferObject, + ::cuGLRegisterBufferObject, ::cuGLUnmapBufferObject, + ::cuGLUnregisterBufferObject, ::cuGLMapBufferObjectAsync, + ::cuGLUnmapBufferObjectAsync, ::cuGLSetBufferObjectMapFlags, + ::cuWGLGetDevice*/ + fn cuGLInit() -> cuda_types::CUresult; + /** \brief Registers an OpenGL buffer object + + \deprecated This function is deprecated as of Cuda 3.0. + + Registers the buffer object specified by \p buffer for access by + CUDA. This function must be called before CUDA can map the buffer + object. There must be a valid OpenGL context bound to the current + thread when this function is called, and the buffer name is + resolved by that context. + + \param buffer - The name of the buffer object to register. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_ALREADY_MAPPED + \notefnerr + + \sa ::cuGraphicsGLRegisterBuffer*/ + fn cuGLRegisterBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult; + /** \brief Maps an OpenGL buffer object + + \deprecated This function is deprecated as of Cuda 3.0. + + Maps the buffer object specified by \p buffer into the address space of the + current CUDA context and returns in \p *dptr and \p *size the base pointer + and size of the resulting mapping. + + There must be a valid OpenGL context bound to the current thread + when this function is called. This must be the same context, or a + member of the same shareGroup, as the context that was bound when + the buffer was registered. + + All streams in the current CUDA context are synchronized with the + current GL context. + + \param dptr - Returned mapped base pointer + \param size - Returned size of mapping + \param buffer - The name of the buffer object to map + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_MAP_FAILED + \notefnerr + + \sa ::cuGraphicsMapResources*/ + fn cuGLMapBufferObject_v2_ptds( + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, + ) -> cuda_types::CUresult; + /** \brief Unmaps an OpenGL buffer object + + \deprecated This function is deprecated as of Cuda 3.0. + + Unmaps the buffer object specified by \p buffer for access by CUDA. + + There must be a valid OpenGL context bound to the current thread + when this function is called. This must be the same context, or a + member of the same shareGroup, as the context that was bound when + the buffer was registered. + + All streams in the current CUDA context are synchronized with the + current GL context. + + \param buffer - Buffer object to unmap + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuGraphicsUnmapResources*/ + fn cuGLUnmapBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult; + /** \brief Unregister an OpenGL buffer object + + \deprecated This function is deprecated as of Cuda 3.0. + + Unregisters the buffer object specified by \p buffer. This + releases any resources associated with the registered buffer. + After this call, the buffer may no longer be mapped for access by + CUDA. + + There must be a valid OpenGL context bound to the current thread + when this function is called. This must be the same context, or a + member of the same shareGroup, as the context that was bound when + the buffer was registered. + + \param buffer - Name of the buffer object to unregister + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuGraphicsUnregisterResource*/ + fn cuGLUnregisterBufferObject(buffer: cuda_types::GLuint) -> cuda_types::CUresult; + /** \brief Set the map flags for an OpenGL buffer object + + \deprecated This function is deprecated as of Cuda 3.0. + + Sets the map flags for the buffer object specified by \p buffer. + + Changes to \p Flags will take effect the next time \p buffer is mapped. + The \p Flags argument may be any of the following: + - ::CU_GL_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + resource will be used. It is therefore assumed that this resource will be + read from and written to by CUDA kernels. This is the default value. + - ::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA kernels which + access this resource will not write to this resource. + - ::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that CUDA kernels + which access this resource will not read from this resource and will + write over the entire contents of the resource, so none of the data + previously stored in the resource will be preserved. + + If \p buffer has not been registered for use with CUDA, then + ::CUDA_ERROR_INVALID_HANDLE is returned. If \p buffer is presently + mapped for access by CUDA, then ::CUDA_ERROR_ALREADY_MAPPED is returned. + + There must be a valid OpenGL context bound to the current thread + when this function is called. This must be the same context, or a + member of the same shareGroup, as the context that was bound when + the buffer was registered. + + \param buffer - Buffer object to unmap + \param Flags - Map flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED, + ::CUDA_ERROR_INVALID_CONTEXT, + \notefnerr + + \sa ::cuGraphicsResourceSetMapFlags*/ + fn cuGLSetBufferObjectMapFlags( + buffer: cuda_types::GLuint, + Flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Maps an OpenGL buffer object + + \deprecated This function is deprecated as of Cuda 3.0. + + Maps the buffer object specified by \p buffer into the address space of the + current CUDA context and returns in \p *dptr and \p *size the base pointer + and size of the resulting mapping. + + There must be a valid OpenGL context bound to the current thread + when this function is called. This must be the same context, or a + member of the same shareGroup, as the context that was bound when + the buffer was registered. + + Stream \p hStream in the current CUDA context is synchronized with + the current GL context. + + \param dptr - Returned mapped base pointer + \param size - Returned size of mapping + \param buffer - The name of the buffer object to map + \param hStream - Stream to synchronize + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_MAP_FAILED + \notefnerr + + \sa ::cuGraphicsMapResources*/ + fn cuGLMapBufferObjectAsync_v2_ptsz( + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Unmaps an OpenGL buffer object + + \deprecated This function is deprecated as of Cuda 3.0. + + Unmaps the buffer object specified by \p buffer for access by CUDA. + + There must be a valid OpenGL context bound to the current thread + when this function is called. This must be the same context, or a + member of the same shareGroup, as the context that was bound when + the buffer was registered. + + Stream \p hStream in the current CUDA context is synchronized with + the current GL context. + + \param buffer - Name of the buffer object to unmap + \param hStream - Stream to synchronize + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuGraphicsUnmapResources*/ + fn cuGLUnmapBufferObjectAsync( + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuGLGetDevices( + pCudaDeviceCount: *mut ::core::ffi::c_uint, + pCudaDevices: *mut cuda_types::CUdevice, + cudaDeviceCount: ::core::ffi::c_uint, + deviceList: cuda_types::CUGLDeviceList, + ) -> cuda_types::CUresult; + fn cuGLMapBufferObject_v2( + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, + ) -> cuda_types::CUresult; + fn cuGLMapBufferObjectAsync_v2( + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + fn cuGLCtxCreate( + pCtx: *mut cuda_types::CUcontext, + Flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, + ) -> cuda_types::CUresult; + fn cuGLMapBufferObject( + dptr: *mut cuda_types::CUdeviceptr_v1, + size: *mut ::core::ffi::c_uint, + buffer: cuda_types::GLuint, + ) -> cuda_types::CUresult; + fn cuGLMapBufferObjectAsync( + dptr: *mut cuda_types::CUdeviceptr_v1, + size: *mut ::core::ffi::c_uint, + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Registers an EGL image + + Registers the EGLImageKHR specified by \p image for access by + CUDA. A handle to the registered object is returned as \p pCudaResource. + Additional Mapping/Unmapping is not required for the registered resource and + ::cuGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource. + + The application will be responsible for synchronizing access to shared objects. + The application must ensure that any pending operation which access the objects have completed + before passing control to CUDA. This may be accomplished by issuing and waiting for + glFinish command on all GLcontexts (for OpenGL and likewise for other APIs). + The application will be also responsible for ensuring that any pending operation on the + registered CUDA resource has completed prior to executing subsequent commands in other APIs + accesing the same memory objects. + This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably). + + The surface's intended usage is specified using \p flags, as follows: + + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + resource will be used. It is therefore assumed that this resource will be + read from and written to by CUDA. This is the default value. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA + will not write to this resource. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that + CUDA will not read from this resource and will write over the + entire contents of the resource, so none of the data previously + stored in the resource will be preserved. + + The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer. + typedef void* EGLImageKHR + + \param pCudaResource - Pointer to the returned object handle + \param image - An EGLImageKHR image which can be used to create target resource. + \param flags - Map flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED, + ::CUDA_ERROR_INVALID_CONTEXT, + + \sa ::cuGraphicsEGLRegisterImage, ::cuGraphicsUnregisterResource, + ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + ::cuGraphicsUnmapResources, + ::cudaGraphicsEGLRegisterImage*/ + fn cuGraphicsEGLRegisterImage( + pCudaResource: *mut cuda_types::CUgraphicsResource, + image: cuda_types::EGLImageKHR, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Connect CUDA to EGLStream as a consumer. + + Connect CUDA as a consumer to EGLStreamKHR specified by \p stream. + + The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + API to another. + + \param conn - Pointer to the returned connection handle + \param stream - EGLStreamKHR handle + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_CONTEXT, + + \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + ::cudaEGLStreamConsumerConnect*/ + fn cuEGLStreamConsumerConnect( + conn: *mut cuda_types::CUeglStreamConnection, + stream: cuda_types::EGLStreamKHR, + ) -> cuda_types::CUresult; + /** \brief Connect CUDA to EGLStream as a consumer with given flags. + + Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by CUeglResourceLocationFlags. + + The flags specify whether the consumer wants to access frames from system memory or video memory. + Default is ::CU_EGL_RESOURCE_LOCATION_VIDMEM. + + \param conn - Pointer to the returned connection handle + \param stream - EGLStreamKHR handle + \param flags - Flags denote intended location - system or video. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_CONTEXT, + + \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + ::cudaEGLStreamConsumerConnectWithFlags*/ + fn cuEGLStreamConsumerConnectWithFlags( + conn: *mut cuda_types::CUeglStreamConnection, + stream: cuda_types::EGLStreamKHR, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Disconnect CUDA as a consumer to EGLStream . + + Disconnect CUDA as a consumer to EGLStreamKHR. + + \param conn - Conection to disconnect. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_CONTEXT, + + \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + ::cudaEGLStreamConsumerDisconnect*/ + fn cuEGLStreamConsumerDisconnect( + conn: *mut cuda_types::CUeglStreamConnection, + ) -> cuda_types::CUresult; + /** \brief Acquire an image frame from the EGLStream with CUDA as a consumer. + + Acquire an image frame from EGLStreamKHR. This API can also acquire an old frame presented + by the producer unless explicitly disabled by setting EGL_SUPPORT_REUSE_NV flag to EGL_FALSE + during stream initialization. By default, EGLStream is created with this flag set to EGL_TRUE. + ::cuGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get + ::CUeglFrame. + + \param conn - Connection on which to acquire + \param pCudaResource - CUDA resource on which the stream frame will be mapped for use. + \param pStream - CUDA stream for synchronization and any data migrations + implied by ::CUeglResourceLocationFlags. + \param timeout - Desired timeout in usec for a new frame to be acquired. + If set as ::CUDA_EGL_INFINITE_TIMEOUT, acquire waits infinitely. + After timeout occurs CUDA consumer tries to acquire an old frame + if available and EGL_SUPPORT_REUSE_NV flag is set. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_LAUNCH_TIMEOUT, + + \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + ::cudaEGLStreamConsumerAcquireFrame*/ + fn cuEGLStreamConsumerAcquireFrame( + conn: *mut cuda_types::CUeglStreamConnection, + pCudaResource: *mut cuda_types::CUgraphicsResource, + pStream: *mut cuda_types::CUstream, + timeout: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Releases the last frame acquired from the EGLStream. + + Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR. + If EGL_SUPPORT_REUSE_NV flag is set to EGL_TRUE, at the time of EGL creation + this API doesn't release the last frame acquired on the EGLStream. + By default, EGLStream is created with this flag set to EGL_TRUE. + + \param conn - Connection on which to release + \param pCudaResource - CUDA resource whose corresponding frame is to be released + \param pStream - CUDA stream on which release will be done. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + + \sa ::cuEGLStreamConsumerConnect, ::cuEGLStreamConsumerDisconnect, + ::cuEGLStreamConsumerAcquireFrame, ::cuEGLStreamConsumerReleaseFrame, + ::cudaEGLStreamConsumerReleaseFrame*/ + fn cuEGLStreamConsumerReleaseFrame( + conn: *mut cuda_types::CUeglStreamConnection, + pCudaResource: cuda_types::CUgraphicsResource, + pStream: *mut cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Connect CUDA to EGLStream as a producer. + + Connect CUDA as a producer to EGLStreamKHR specified by \p stream. + + The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one + API to another. + + \param conn - Pointer to the returned connection handle + \param stream - EGLStreamKHR handle + \param width - width of the image to be submitted to the stream + \param height - height of the image to be submitted to the stream + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_CONTEXT, + + \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + ::cuEGLStreamProducerPresentFrame, + ::cudaEGLStreamProducerConnect*/ + fn cuEGLStreamProducerConnect( + conn: *mut cuda_types::CUeglStreamConnection, + stream: cuda_types::EGLStreamKHR, + width: cuda_types::EGLint, + height: cuda_types::EGLint, + ) -> cuda_types::CUresult; + /** \brief Disconnect CUDA as a producer to EGLStream . + + Disconnect CUDA as a producer to EGLStreamKHR. + + \param conn - Conection to disconnect. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_INVALID_CONTEXT, + + \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + ::cuEGLStreamProducerPresentFrame, + ::cudaEGLStreamProducerDisconnect*/ + fn cuEGLStreamProducerDisconnect( + conn: *mut cuda_types::CUeglStreamConnection, + ) -> cuda_types::CUresult; + /** \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer. + + When a frame is presented by the producer, it gets associated with the EGLStream + and thus it is illegal to free the frame before the producer is disconnected. + If a frame is freed and reused it may lead to undefined behavior. + + If producer and consumer are on different GPUs (iGPU and dGPU) then frametype + ::CU_EGL_FRAME_TYPE_ARRAY is not supported. ::CU_EGL_FRAME_TYPE_PITCH can be used for + such cross-device applications. + + The ::CUeglFrame is defined as: + \code + typedef struct CUeglFrame_st { + union { + CUarray pArray[MAX_PLANES]; + void* pPitch[MAX_PLANES]; + } frame; + unsigned int width; + unsigned int height; + unsigned int depth; + unsigned int pitch; + unsigned int planeCount; + unsigned int numChannels; + CUeglFrameType frameType; + CUeglColorFormat eglColorFormat; + CUarray_format cuFormat; + } CUeglFrame; + \endcode + + For ::CUeglFrame of type ::CU_EGL_FRAME_TYPE_PITCH, the application may present sub-region of a memory + allocation. In that case, the pitched pointer will specify the start address of the sub-region in + the allocation and corresponding ::CUeglFrame fields will specify the dimensions of the sub-region. + + \param conn - Connection on which to present the CUDA array + \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream. + \param pStream - CUDA stream on which to present the frame. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + + \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + ::cuEGLStreamProducerReturnFrame, + ::cudaEGLStreamProducerPresentFrame*/ + fn cuEGLStreamProducerPresentFrame( + conn: *mut cuda_types::CUeglStreamConnection, + eglframe: cuda_types::CUeglFrame, + pStream: *mut cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Return the CUDA eglFrame to the EGLStream released by the consumer. + + This API can potentially return CUDA_ERROR_LAUNCH_TIMEOUT if the consumer has not + returned a frame to EGL stream. If timeout is returned the application can retry. + + \param conn - Connection on which to return + \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream. + \param pStream - CUDA stream on which to return the frame. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_LAUNCH_TIMEOUT + + \sa ::cuEGLStreamProducerConnect, ::cuEGLStreamProducerDisconnect, + ::cuEGLStreamProducerPresentFrame, + ::cudaEGLStreamProducerReturnFrame*/ + fn cuEGLStreamProducerReturnFrame( + conn: *mut cuda_types::CUeglStreamConnection, + eglframe: *mut cuda_types::CUeglFrame, + pStream: *mut cuda_types::CUstream, + ) -> cuda_types::CUresult; + /** \brief Get an eglFrame through which to access a registered EGL graphics resource. + + Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource + \p resource may be accessed. + This API can only be called for registered EGL graphics resources. + + The ::CUeglFrame is defined as: + \code + typedef struct CUeglFrame_st { + union { + CUarray pArray[MAX_PLANES]; + void* pPitch[MAX_PLANES]; + } frame; + unsigned int width; + unsigned int height; + unsigned int depth; + unsigned int pitch; + unsigned int planeCount; + unsigned int numChannels; + CUeglFrameType frameType; + CUeglColorFormat eglColorFormat; + CUarray_format cuFormat; + } CUeglFrame; + \endcode + + If \p resource is not registered then ::CUDA_ERROR_NOT_MAPPED is returned. + * + \param eglFrame - Returned eglFrame. + \param resource - Registered resource to access. + \param index - Index for cubemap surfaces. + \param mipLevel - Mipmap level for the subresource to access. + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_NOT_MAPPED + + \sa + ::cuGraphicsMapResources, + ::cuGraphicsSubResourceGetMappedArray, + ::cuGraphicsResourceGetMappedPointer, + ::cudaGraphicsResourceGetMappedEglFrame*/ + fn cuGraphicsResourceGetMappedEglFrame( + eglFrame: *mut cuda_types::CUeglFrame, + resource: cuda_types::CUgraphicsResource, + index: ::core::ffi::c_uint, + mipLevel: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Creates an event from EGLSync object + + Creates an event *phEvent from an EGLSyncKHR eglSync with the flags specified + via \p flags. Valid flags include: + - ::CU_EVENT_DEFAULT: Default event creation flag. + - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking + synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on + an event created with this flag will block until the event has actually + been completed. + + Once the \p eglSync gets destroyed, ::cuEventDestroy is the only API + that can be invoked on the event. + + ::cuEventRecord and TimingData are not supported for events created from EGLSync. + + The EGLSyncKHR is an opaque handle to an EGL sync object. + typedef void* EGLSyncKHR + + \param phEvent - Returns newly created event + \param eglSync - Opaque handle to EGLSync object + \param flags - Event creation flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + + \sa + ::cuEventQuery, + ::cuEventSynchronize, + ::cuEventDestroy*/ + fn cuEventCreateFromEGLSync( + phEvent: *mut cuda_types::CUevent, + eglSync: cuda_types::EGLSyncKHR, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Gets the CUDA device associated with a VDPAU device + + Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if + applicable. + + \param pDevice - Device associated with vdpDevice + \param vdpDevice - A VdpDevice handle + \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE + \notefnerr + + \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface, + ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource, + ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + ::cudaVDPAUGetDevice*/ + fn cuVDPAUGetDevice( + pDevice: *mut cuda_types::CUdevice, + vdpDevice: cuda_types::VdpDevice, + vdpGetProcAddress: cuda_types::VdpGetProcAddress, + ) -> cuda_types::CUresult; + /** \brief Create a CUDA context for interoperability with VDPAU + + Creates a new CUDA context, initializes VDPAU interoperability, and + associates the CUDA context with the calling thread. It must be called + before performing any other VDPAU interoperability operations. It may fail + if the needed VDPAU driver facilities are not available. For usage of the + \p flags parameter, see ::cuCtxCreate(). + + \param pCtx - Returned CUDA context + \param flags - Options for CUDA context creation + \param device - Device on which to create the context + \param vdpDevice - The VdpDevice to interop with + \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_DEINITIALIZED, + ::CUDA_ERROR_NOT_INITIALIZED, + ::CUDA_ERROR_INVALID_CONTEXT, + ::CUDA_ERROR_INVALID_VALUE, + ::CUDA_ERROR_OUT_OF_MEMORY + \notefnerr + + \sa ::cuCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface, + ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource, + ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + ::cuVDPAUGetDevice*/ + fn cuVDPAUCtxCreate_v2( + pCtx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, + vdpDevice: cuda_types::VdpDevice, + vdpGetProcAddress: cuda_types::VdpGetProcAddress, + ) -> cuda_types::CUresult; + /** \brief Registers a VDPAU VdpVideoSurface object + + Registers the VdpVideoSurface specified by \p vdpSurface for access by + CUDA. A handle to the registered object is returned as \p pCudaResource. + The surface's intended usage is specified using \p flags, as follows: + + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + resource will be used. It is therefore assumed that this resource will be + read from and written to by CUDA. This is the default value. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA + will not write to this resource. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that + CUDA will not read from this resource and will write over the + entire contents of the resource, so none of the data previously + stored in the resource will be preserved. + + The VdpVideoSurface is presented as an array of subresources that may be + accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray. + The exact number of valid \p arrayIndex values depends on the VDPAU surface + format. The mapping is shown in the table below. \p mipLevel must be 0. + + \htmlonly + + + + + + + + + + +
VdpChromaType arrayIndexSize FormatContent
VDP_CHROMA_TYPE_4200 w x h/2R8 Top-field luma
1 w x h/2R8 Bottom-field luma
2 w/2 x h/4R8G8 Top-field chroma
3 w/2 x h/4R8G8 Bottom-field chroma
VDP_CHROMA_TYPE_4220 w x h/2R8 Top-field luma
1 w x h/2R8 Bottom-field luma
2 w/2 x h/2R8G8 Top-field chroma
3 w/2 x h/2R8G8 Bottom-field chroma
+ \endhtmlonly + + \latexonly + \begin{tabular}{|l|l|l|l|l|} + \hline + VdpChromaType & arrayIndex & Size & Format & Content \\ + \hline + VDP\_CHROMA\_TYPE\_420 & 0 & w x h/2 & R8 & Top-field luma \\ + & 1 & w x h/2 & R8 & Bottom-field luma \\ + & 2 & w/2 x h/4 & R8G8 & Top-field chroma \\ + & 3 & w/2 x h/4 & R8G8 & Bottom-field chroma \\ + \hline + VDP\_CHROMA\_TYPE\_422 & 0 & w x h/2 & R8 & Top-field luma \\ + & 1 & w x h/2 & R8 & Bottom-field luma \\ + & 2 & w/2 x h/2 & R8G8 & Top-field chroma \\ + & 3 & w/2 x h/2 & R8G8 & Bottom-field chroma \\ + \hline + \end{tabular} + \endlatexonly + + \param pCudaResource - Pointer to the returned object handle + \param vdpSurface - The VdpVideoSurface to be registered + \param flags - Map flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED, + ::CUDA_ERROR_INVALID_CONTEXT, + \notefnerr + + \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, + ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource, + ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + ::cuVDPAUGetDevice, + ::cudaGraphicsVDPAURegisterVideoSurface*/ + fn cuGraphicsVDPAURegisterVideoSurface( + pCudaResource: *mut cuda_types::CUgraphicsResource, + vdpSurface: cuda_types::VdpVideoSurface, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + /** \brief Registers a VDPAU VdpOutputSurface object + + Registers the VdpOutputSurface specified by \p vdpSurface for access by + CUDA. A handle to the registered object is returned as \p pCudaResource. + The surface's intended usage is specified using \p flags, as follows: + + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + resource will be used. It is therefore assumed that this resource will be + read from and written to by CUDA. This is the default value. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA + will not write to this resource. + - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that + CUDA will not read from this resource and will write over the + entire contents of the resource, so none of the data previously + stored in the resource will be preserved. + + The VdpOutputSurface is presented as an array of subresources that may be + accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray. + The exact number of valid \p arrayIndex values depends on the VDPAU surface + format. The mapping is shown in the table below. \p mipLevel must be 0. + + \htmlonly + + + + +
VdpRGBAFormat arrayIndexSize Format Content
VDP_RGBA_FORMAT_B8G8R8A8 0 w x hARGB8 Entire surface
VDP_RGBA_FORMAT_R10G10B10A20 w x hA2BGR10Entire surface
+ \endhtmlonly + + \latexonly + \begin{tabular}{|l|l|l|l|l|} + \hline + VdpRGBAFormat & arrayIndex & Size & Format & Content \\ + \hline + VDP\_RGBA\_FORMAT\_B8G8R8A8 & 0 & w x h & ARGB8 & Entire surface \\ + VDP\_RGBA\_FORMAT\_R10G10B10A2 & 0 & w x h & A2BGR10 & Entire surface \\ + \hline + \end{tabular} + \endlatexonly + + \param pCudaResource - Pointer to the returned object handle + \param vdpSurface - The VdpOutputSurface to be registered + \param flags - Map flags + + \return + ::CUDA_SUCCESS, + ::CUDA_ERROR_INVALID_HANDLE, + ::CUDA_ERROR_ALREADY_MAPPED, + ::CUDA_ERROR_INVALID_CONTEXT, + \notefnerr + + \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, + ::cuGraphicsVDPAURegisterVideoSurface, ::cuGraphicsUnregisterResource, + ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources, + ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray, + ::cuVDPAUGetDevice, + ::cudaGraphicsVDPAURegisterOutputSurface*/ + fn cuGraphicsVDPAURegisterOutputSurface( + pCudaResource: *mut cuda_types::CUgraphicsResource, + vdpSurface: cuda_types::VdpOutputSurface, + flags: ::core::ffi::c_uint, + ) -> cuda_types::CUresult; + fn cuVDPAUCtxCreate( + pCtx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, + vdpDevice: cuda_types::VdpDevice, + vdpGetProcAddress: cuda_types::VdpGetProcAddress, + ) -> cuda_types::CUresult; +} diff --git a/cuda_base/src/lib.rs b/cuda_base/src/lib.rs index 3f6f779a..833d3721 100644 --- a/cuda_base/src/lib.rs +++ b/cuda_base/src/lib.rs @@ -1,110 +1,25 @@ extern crate proc_macro; -use std::collections::hash_map; -use std::iter; - use proc_macro::TokenStream; use proc_macro2::Span; -use quote::{format_ident, quote, ToTokens}; -use rustc_hash::{FxHashMap, FxHashSet}; +use quote::{quote, ToTokens}; +use rustc_hash::FxHashMap; +use std::iter; use syn::parse::{Parse, ParseStream}; use syn::punctuated::Punctuated; use syn::visit_mut::VisitMut; use syn::{ - bracketed, parse_macro_input, Abi, Fields, File, FnArg, ForeignItem, ForeignItemFn, Ident, - Item, ItemForeignMod, LitStr, PatType, Path, PathArguments, PathSegment, ReturnType, Signature, - Token, Type, TypeArray, TypePath, TypePtr, + bracketed, parse_macro_input, File, ForeignItem, ForeignItemFn, Ident, Item, Path, Signature, + Token, }; const CUDA_RS: &'static str = include_str! {"cuda.rs"}; -// This macro copies cuda.rs as-is with some changes: -// * All function declarations are filtered out -// * CUdeviceptr_v2 is redefined from `unsigned long long` to `*void` -// * `extern "C"` gets replaced by `extern "system"` -// * CUuuid_st is redefined to use uchar instead of char -#[proc_macro] -pub fn cuda_type_declarations(_: TokenStream) -> TokenStream { - let mut cuda_module = syn::parse_str::(CUDA_RS).unwrap(); - cuda_module.items = cuda_module - .items - .into_iter() - .filter_map(|item| match item { - Item::ForeignMod(_) => None, - Item::Struct(mut struct_) => { - if "CUdeviceptr_v2" == struct_.ident.to_string() { - match &mut struct_.fields { - Fields::Unnamed(ref mut fields) => { - fields.unnamed[0].ty = - absolute_path_to_mut_ptr(&["std", "os", "raw", "c_void"]) - } - _ => unreachable!(), - } - } else if "CUuuid_st" == struct_.ident.to_string() { - match &mut struct_.fields { - Fields::Named(ref mut fields) => match fields.named[0].ty { - Type::Array(TypeArray { ref mut elem, .. }) => { - *elem = Box::new(Type::Path(TypePath { - qself: None, - path: segments_to_path(&["std", "os", "raw", "c_uchar"]), - })) - } - _ => unreachable!(), - }, - _ => panic!(), - } - } - Some(Item::Struct(struct_)) - } - i => Some(i), - }) - .collect::>(); - syn::visit_mut::visit_file_mut(&mut FixAbi, &mut cuda_module); - cuda_module.into_token_stream().into() -} - -fn segments_to_path(path: &[&'static str]) -> Path { - let mut segments = Punctuated::new(); - for ident in path { - let ident = PathSegment { - ident: Ident::new(ident, Span::call_site()), - arguments: PathArguments::None, - }; - segments.push(ident); - } - Path { - leading_colon: Some(Token![::](Span::call_site())), - segments, - } -} - -fn absolute_path_to_mut_ptr(path: &[&'static str]) -> Type { - Type::Ptr(TypePtr { - star_token: Token![*](Span::call_site()), - const_token: None, - mutability: Some(Token![mut](Span::call_site())), - elem: Box::new(Type::Path(TypePath { - qself: None, - path: segments_to_path(path), - })), - }) -} - -struct FixAbi; - -impl VisitMut for FixAbi { - fn visit_abi_mut(&mut self, i: &mut Abi) { - if let Some(ref mut name) = i.name { - *name = LitStr::new("system", Span::call_site()); - } - } -} - // This macro accepts following arguments: -// * `type_path`: path to the module with type definitions (in the module tree) // * `normal_macro`: ident for a normal macro -// * `override_macro`: ident for an override macro -// * `override_fns`: list of override functions +// * zero or more: +// * `override_macro`: ident for an override macro +// * `override_fns`: list of override functions // Then macro goes through every function in rust.rs, and for every fn `foo`: // * if `foo` is contained in `override_fns` then pass it into `override_macro` // * if `foo` is not contained in `override_fns` pass it to `normal_macro` @@ -117,390 +32,191 @@ impl VisitMut for FixAbi { #[proc_macro] pub fn cuda_function_declarations(tokens: TokenStream) -> TokenStream { let input = parse_macro_input!(tokens as FnDeclInput); - let cuda_module = syn::parse_str::(CUDA_RS).unwrap(); - let override_fns = input - .override_fns - .iter() - .map(ToString::to_string) - .collect::>(); - let (normal_macro_args, override_macro_args): (Vec<_>, Vec<_>) = cuda_module - .items - .into_iter() - .filter_map(|item| match item { - Item::ForeignMod(ItemForeignMod { mut items, .. }) => match items.pop().unwrap() { - ForeignItem::Fn(ForeignItemFn { - sig: - Signature { - ident, - inputs, - output, - .. - }, - .. - }) => { - let use_normal_macro = !override_fns.contains(&ident.to_string()); - let inputs = inputs - .into_iter() - .map(|fn_arg| match fn_arg { - FnArg::Typed(mut pat_type) => { - pat_type.ty = - prepend_cuda_path_to_type(&input.type_path, pat_type.ty); - FnArg::Typed(pat_type) - } - _ => unreachable!(), - }) - .collect::>(); - let output = match output { - ReturnType::Type(_, type_) => type_, - ReturnType::Default => unreachable!(), - }; - let type_path = input.type_path.clone(); - Some(( - quote! { - "system" fn #ident(#inputs) -> #type_path :: #output - }, - use_normal_macro, - )) - } - _ => unreachable!(), - }, - _ => None, - }) - .partition(|(_, use_normal_macro)| *use_normal_macro); - let mut result = proc_macro2::TokenStream::new(); - if !normal_macro_args.is_empty() { - let punctuated_normal_macro_args = to_punctuated::(normal_macro_args); - let macro_ = &input.normal_macro; - result.extend(iter::once(quote! { - #macro_ ! (#punctuated_normal_macro_args); - })); - } - if !override_macro_args.is_empty() { - let punctuated_override_macro_args = to_punctuated::(override_macro_args); - let macro_ = &input.override_macro; - result.extend(iter::once(quote! { - #macro_ ! (#punctuated_override_macro_args); - })); - } - result.into() -} - -fn to_punctuated( - elms: Vec<(proc_macro2::TokenStream, bool)>, -) -> proc_macro2::TokenStream { - let mut collection = Punctuated::::new(); - collection.extend(elms.into_iter().map(|(token_stream, _)| token_stream)); - collection.into_token_stream() -} - -fn prepend_cuda_path_to_type(base_path: &Path, type_: Box) -> Box { - match *type_ { - Type::Path(mut type_path) => { - type_path.path = prepend_cuda_path_to_path(base_path, type_path.path); - Box::new(Type::Path(type_path)) - } - Type::Ptr(mut type_ptr) => { - type_ptr.elem = prepend_cuda_path_to_type(base_path, type_ptr.elem); - Box::new(Type::Ptr(type_ptr)) + let mut choose_macro = ChooseMacro::new(input); + let mut cuda_module = syn::parse_str::(CUDA_RS).unwrap(); + syn::visit_mut::visit_file_mut(&mut FixFnSignatures, &mut cuda_module); + let extern_ = if let Item::ForeignMod(extern_) = cuda_module.items.pop().unwrap() { + extern_ + } else { + unreachable!() + }; + let abi = extern_.abi.name; + for mut item in extern_.items { + if let ForeignItem::Fn(ForeignItemFn { + sig: Signature { ref ident, .. }, + ref mut attrs, + .. + }) = item + { + *attrs = Vec::new(); + choose_macro.add(ident, quote! { #abi #item }); + } else { + unreachable!() } - _ => unreachable!(), - } -} - -fn prepend_cuda_path_to_path(base_path: &Path, path: Path) -> Path { - if path.leading_colon.is_some() { - return path; } - if path.segments.len() == 1 { - let ident = path.segments[0].ident.to_string(); - if ident.starts_with("CU") - || ident.starts_with("cu") - || ident.starts_with("GL") - || ident == "HGPUNV" - { - let mut base_path = base_path.clone(); - base_path.segments.extend(path.segments); - return base_path; + let mut result = proc_macro2::TokenStream::new(); + for (path, items) in + iter::once(choose_macro.default).chain(choose_macro.override_sets.into_iter()) + { + if items.is_empty() { + continue; + } + quote! { + #path ! { #(#items)* } } + .to_tokens(&mut result); } - path + result.into() } - struct FnDeclInput { - type_path: Path, normal_macro: Path, - override_macro: Path, - override_fns: Punctuated, + overrides: Punctuated, } impl Parse for FnDeclInput { fn parse(input: ParseStream) -> syn::Result { - let type_path = input.parse::()?; - input.parse::()?; let normal_macro = input.parse::()?; - input.parse::()?; - let override_macro = input.parse::()?; - input.parse::()?; - let override_fns_content; - bracketed!(override_fns_content in input); - let override_fns = override_fns_content.parse_terminated(Ident::parse)?; + let overrides = if input.is_empty() { + Punctuated::new() + } else { + input.parse::()?; + input.parse_terminated(OverrideMacro::parse, Token![,])? + }; Ok(Self { - type_path, normal_macro, - override_macro, - override_fns, + overrides, }) } } +struct OverrideMacro { + macro_: Path, + functions: Punctuated, +} + +impl Parse for OverrideMacro { + fn parse(input: ParseStream) -> syn::Result { + let macro_ = input.parse::()?; + input.parse::()?; + let functions_content; + bracketed!(functions_content in input); + let functions = functions_content.parse_terminated(Ident::parse, Token![,])?; + Ok(Self { macro_, functions }) + } +} -// This trait accepts following parameters: -// * `type_path`: path to the module with type definitions (in the module tree) -// * `trait_`: name of the trait to be derived -// * `ignore_types`: bracketed list of types to ignore -// * `ignore_fns`: bracketed list of fns to ignore -#[proc_macro] -pub fn cuda_derive_display_trait(tokens: TokenStream) -> TokenStream { - let input = parse_macro_input!(tokens as DeriveDisplayInput); - let cuda_module = syn::parse_str::(CUDA_RS).unwrap(); - let mut derive_state = DeriveDisplayState::new(input); - cuda_module - .items - .into_iter() - .filter_map(|i| cuda_derive_display_trait_for_item(&mut derive_state, i)) - .collect::() - .into() +struct ChooseMacro { + default: (Path, Vec), + override_lookup: FxHashMap, + override_sets: FxHashMap>, } -fn cuda_derive_display_trait_for_item( - state: &mut DeriveDisplayState, - item: Item, -) -> Option { - let path_prefix = &state.type_path; - let path_prefix_iter = iter::repeat(&path_prefix); - let trait_ = &state.trait_; - let trait_iter = iter::repeat(&state.trait_); - match item { - Item::Const(_) => None, - Item::ForeignMod(ItemForeignMod { mut items, .. }) => match items.pop().unwrap() { - ForeignItem::Fn(ForeignItemFn { - sig: Signature { ident, inputs, .. }, - .. - }) => { - if state.ignore_fns.contains(&ident) { - return None; - } - let inputs = inputs - .into_iter() - .map(|fn_arg| match fn_arg { - FnArg::Typed(mut pat_type) => { - pat_type.ty = prepend_cuda_path_to_type(path_prefix, pat_type.ty); - FnArg::Typed(pat_type) - } - _ => unreachable!(), - }) - .collect::>(); - let inputs_iter = inputs.iter(); - let mut arg_name_iter = inputs.iter().map(|fn_arg| match fn_arg { - FnArg::Typed(PatType { pat, .. }) => pat, - _ => unreachable!(), - }); - let fn_name = format_ident!("write_{}", ident); - let original_fn_name = ident.to_string(); - Some(match arg_name_iter.next() { - Some(first_arg_name) => quote! { - pub fn #fn_name(writer: &mut (impl std::io::Write + ?Sized), #(#inputs_iter,)*) -> std::io::Result<()> { - writer.write_all(concat!("(", stringify!(#first_arg_name), ": ").as_bytes())?; - let mut arg_idx = 0usize; - CudaDisplay::write(&#first_arg_name, #original_fn_name, arg_idx, writer)?; - #( - writer.write_all(b", ")?; - writer.write_all(concat!(stringify!(#arg_name_iter), ": ").as_bytes())?; - CudaDisplay::write(&#arg_name_iter, #original_fn_name, arg_idx, writer)?; - arg_idx += 1; - )* - writer.write_all(b")") - } - }, - None => quote! { - pub fn #fn_name(writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { - writer.write_all(b"()") - } - }, - }) +impl ChooseMacro { + fn new(input: FnDeclInput) -> Self { + let mut override_lookup = FxHashMap::default(); + let mut override_sets = FxHashMap::default(); + for OverrideMacro { macro_, functions } in input.overrides { + for ident in functions { + override_lookup.insert(ident, macro_.clone()); + override_sets.insert(macro_.clone(), Vec::new()); } - _ => unreachable!(), - }, - Item::Impl(mut item_impl) => { - let enum_ = match *(item_impl.self_ty) { - Type::Path(mut path) => path.path.segments.pop().unwrap().into_value().ident, - _ => unreachable!(), - }; - let variant_ = match item_impl.items.pop().unwrap() { - syn::ImplItem::Const(item_const) => item_const.ident, - _ => unreachable!(), - }; - state.record_enum_variant(enum_, variant_); - None } - Item::Struct(item_struct) => { - let item_struct_name = item_struct.ident.to_string(); - if state.ignore_types.contains(&item_struct.ident) { - return None; - } - if item_struct_name.ends_with("_enum") { - let enum_ = &item_struct.ident; - let enum_iter = iter::repeat(&item_struct.ident); - let variants = state.enums.get(&item_struct.ident).unwrap().iter(); - Some(quote! { - impl #trait_ for #path_prefix :: #enum_ { - fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { - match self { - #(& #path_prefix_iter :: #enum_iter :: #variants => writer.write_all(stringify!(#variants).as_bytes()),)* - _ => write!(writer, "{}", self.0) - } - } - } - }) - } else { - let struct_ = &item_struct.ident; - let (first_field, rest_of_fields) = match item_struct.fields { - Fields::Named(fields) => { - let mut all_idents = fields.named.into_iter().filter_map(|f| { - let f_ident = f.ident.unwrap(); - let name = f_ident.to_string(); - if name.starts_with("reserved") || name == "_unused" { - None - } else { - Some(f_ident) - } - }); - let first = match all_idents.next() { - Some(f) => f, - None => return None, - }; - (first, all_idents) - } - _ => return None, - }; - Some(quote! { - impl #trait_ for #path_prefix :: #struct_ { - fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { - writer.write_all(concat!("{ ", stringify!(#first_field), ": ").as_bytes())?; - #trait_::write(&self.#first_field, "", 0, writer)?; - #( - writer.write_all(concat!(", ", stringify!(#rest_of_fields), ": ").as_bytes())?; - #trait_iter::write(&self.#rest_of_fields, "", 0, writer)?; - )* - writer.write_all(b" }") - } - } - }) - } + Self { + default: (input.normal_macro, Vec::new()), + override_lookup, + override_sets, } - Item::Type(item_type) => { - if state.ignore_types.contains(&item_type.ident) { - return None; - }; - match *(item_type.ty) { - Type::Ptr(_) => { - let type_ = item_type.ident; - Some(quote! { - impl #trait_ for #path_prefix :: #type_ { - fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { - write!(writer, "{:p}", *self) - } - } - }) - } - Type::Path(type_path) => { - if type_path.path.leading_colon.is_some() { - let option_seg = type_path.path.segments.last().unwrap(); - if option_seg.ident == "Option" { - match &option_seg.arguments { - PathArguments::AngleBracketed(generic) => match generic.args[0] { - syn::GenericArgument::Type(Type::BareFn(_)) => { - let type_ = &item_type.ident; - return Some(quote! { - impl #trait_ for #path_prefix :: #type_ { - fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { - write!(writer, "{:p}", unsafe { std::mem::transmute::<#path_prefix :: #type_, *mut ::std::ffi::c_void>(*self) }) - } - } - }); - } - _ => unreachable!(), - }, - _ => unreachable!(), - } - } - } - None - } - _ => unreachable!(), + } + + fn add(&mut self, ident: &Ident, tokens: proc_macro2::TokenStream) { + match self.override_lookup.get(ident) { + Some(override_macro) => { + self.override_sets + .get_mut(override_macro) + .unwrap() + .push(tokens); } + None => self.default.1.push(tokens), } - Item::Union(_) => None, - Item::Use(_) => None, - _ => unreachable!(), } } -struct DeriveDisplayState { - type_path: Path, - trait_: Path, - ignore_types: FxHashSet, - ignore_fns: FxHashSet, - enums: FxHashMap>, -} +// For some reason prettyplease will append trailing comma *only* +// if there are two or more arguments +struct FixFnSignatures; -impl DeriveDisplayState { - fn new(input: DeriveDisplayInput) -> Self { - DeriveDisplayState { - type_path: input.type_path, - trait_: input.trait_, - ignore_types: input.ignore_types.into_iter().collect(), - ignore_fns: input.ignore_fns.into_iter().collect(), - enums: Default::default(), - } +impl VisitMut for FixFnSignatures { + fn visit_signature_mut(&mut self, s: &mut syn::Signature) { + s.inputs.pop_punct(); } +} - fn record_enum_variant(&mut self, enum_: Ident, variant: Ident) { - match self.enums.entry(enum_) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(variant); - } - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![variant]); - } - } +const MODULES: &[&str] = &[ + "context", "device", "driver", "function", "link", "memory", "module", "pointer", +]; + +#[proc_macro] +pub fn cuda_normalize_fn(tokens: TokenStream) -> TokenStream { + let mut path = parse_macro_input!(tokens as syn::Path); + let fn_ = path + .segments + .pop() + .unwrap() + .into_tuple() + .0 + .ident + .to_string(); + let already_has_module = MODULES.contains(&&*path.segments.last().unwrap().ident.to_string()); + let segments: Vec = split(&fn_[2..]); // skip "cu" + let fn_path = join(segments, !already_has_module); + quote! { + #path #fn_path } + .into() } -struct DeriveDisplayInput { - type_path: Path, - trait_: Path, - ignore_types: Punctuated, - ignore_fns: Punctuated, +fn split(fn_: &str) -> Vec { + let mut result = Vec::new(); + for c in fn_.chars() { + if c.is_ascii_uppercase() { + result.push(c.to_ascii_lowercase().to_string()); + } else { + result.last_mut().unwrap().push(c); + } + } + result } -impl Parse for DeriveDisplayInput { - fn parse(input: ParseStream) -> syn::Result { - let type_path = input.parse::()?; - input.parse::()?; - let trait_ = input.parse::()?; - input.parse::()?; - let ignore_types_buffer; - bracketed!(ignore_types_buffer in input); - let ignore_types = ignore_types_buffer.parse_terminated(Ident::parse)?; - input.parse::()?; - let ignore_fns_buffer; - bracketed!(ignore_fns_buffer in input); - let ignore_fns = ignore_fns_buffer.parse_terminated(Ident::parse)?; - Ok(Self { - type_path, - trait_, - ignore_types, - ignore_fns, +fn join(fn_: Vec, find_module: bool) -> Punctuated { + fn full_form(segment: &str) -> Option<&[&str]> { + Some(match segment { + "ctx" => &["context"], + "func" => &["function"], + "mem" => &["memory"], + "memcpy" => &["memory", "copy"], + _ => return None, }) } + let mut normalized: Vec<&str> = Vec::new(); + for segment in fn_.iter() { + match full_form(segment) { + Some(segments) => normalized.extend(segments.into_iter()), + None => normalized.push(&*segment), + } + } + if !find_module { + return [Ident::new(&normalized.join("_"), Span::call_site())] + .into_iter() + .collect(); + } + if !MODULES.contains(&normalized[0]) { + let mut globalized = vec!["driver"]; + globalized.extend(normalized); + normalized = globalized; + } + let (module, path) = normalized.split_first().unwrap(); + let path = path.join("_"); + [module, &&*path] + .into_iter() + .map(|s| Ident::new(s, Span::call_site())) + .collect() } diff --git a/cuda_types/Cargo.toml b/cuda_types/Cargo.toml index e7798303..2ca470f9 100644 --- a/cuda_types/Cargo.toml +++ b/cuda_types/Cargo.toml @@ -6,3 +6,4 @@ edition = "2018" [dependencies] cuda_base = { path = "../cuda_base" } +hip_runtime-sys = { path = "../ext/hip_runtime-sys" } diff --git a/cuda_types/src/lib.rs b/cuda_types/src/lib.rs index 6d24020d..2c2716a0 100644 --- a/cuda_types/src/lib.rs +++ b/cuda_types/src/lib.rs @@ -1,3 +1,8110 @@ -use cuda_base::cuda_type_declarations; - -cuda_type_declarations!(); \ No newline at end of file +// Generated automatically by zluda_bindgen +// DO NOT EDIT MANUALLY +#![allow(warnings)] +pub const CUDA_VERSION: u32 = 12040; +pub const CU_IPC_HANDLE_SIZE: u32 = 64; +pub const CU_COMPUTE_ACCELERATED_TARGET_BASE: u32 = 65536; +pub const CU_GRAPH_COND_ASSIGN_DEFAULT: u32 = 1; +pub const CU_GRAPH_KERNEL_NODE_PORT_DEFAULT: u32 = 0; +pub const CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC: u32 = 1; +pub const CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER: u32 = 2; +pub const CU_MEMHOSTALLOC_PORTABLE: u32 = 1; +pub const CU_MEMHOSTALLOC_DEVICEMAP: u32 = 2; +pub const CU_MEMHOSTALLOC_WRITECOMBINED: u32 = 4; +pub const CU_MEMHOSTREGISTER_PORTABLE: u32 = 1; +pub const CU_MEMHOSTREGISTER_DEVICEMAP: u32 = 2; +pub const CU_MEMHOSTREGISTER_IOMEMORY: u32 = 4; +pub const CU_MEMHOSTREGISTER_READ_ONLY: u32 = 8; +pub const CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL: u32 = 1; +pub const CU_TENSOR_MAP_NUM_QWORDS: u32 = 16; +pub const CUDA_EXTERNAL_MEMORY_DEDICATED: u32 = 1; +pub const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC: u32 = 1; +pub const CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC: u32 = 2; +pub const CUDA_NVSCISYNC_ATTR_SIGNAL: u32 = 1; +pub const CUDA_NVSCISYNC_ATTR_WAIT: u32 = 2; +pub const CU_MEM_CREATE_USAGE_TILE_POOL: u32 = 1; +pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC: u32 = 1; +pub const CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC: u32 = 2; +pub const CUDA_ARRAY3D_LAYERED: u32 = 1; +pub const CUDA_ARRAY3D_2DARRAY: u32 = 1; +pub const CUDA_ARRAY3D_SURFACE_LDST: u32 = 2; +pub const CUDA_ARRAY3D_CUBEMAP: u32 = 4; +pub const CUDA_ARRAY3D_TEXTURE_GATHER: u32 = 8; +pub const CUDA_ARRAY3D_DEPTH_TEXTURE: u32 = 16; +pub const CUDA_ARRAY3D_COLOR_ATTACHMENT: u32 = 32; +pub const CUDA_ARRAY3D_SPARSE: u32 = 64; +pub const CUDA_ARRAY3D_DEFERRED_MAPPING: u32 = 128; +pub const CU_TRSA_OVERRIDE_FORMAT: u32 = 1; +pub const CU_TRSF_READ_AS_INTEGER: u32 = 1; +pub const CU_TRSF_NORMALIZED_COORDINATES: u32 = 2; +pub const CU_TRSF_SRGB: u32 = 16; +pub const CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION: u32 = 32; +pub const CU_TRSF_SEAMLESS_CUBEMAP: u32 = 64; +pub const CU_LAUNCH_PARAM_END_AS_INT: u32 = 0; +pub const CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT: u32 = 1; +pub const CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT: u32 = 2; +pub const CU_PARAM_TR_DEFAULT: i32 = -1; +pub const CUDA_EGL_INFINITE_TIMEOUT: u32 = 4294967295; +pub type cuuint32_t = u32; +pub type cuuint64_t = u64; +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdeviceptr_v2(pub *mut ::core::ffi::c_void); +pub type CUdeviceptr = CUdeviceptr_v2; +pub type CUdevice_v1 = ::core::ffi::c_int; +pub type CUdevice = CUdevice_v1; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUctx_st { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUcontext(pub *mut CUctx_st); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUmod_st { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmodule(pub *mut CUmod_st); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUfunc_st { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUfunction(pub *mut CUfunc_st); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUlib_st { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlibrary(pub *mut CUlib_st); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUkern_st { + _unused: [u8; 0], +} +pub type CUkernel = *mut CUkern_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUarray_st { + _unused: [u8; 0], +} +pub type CUarray = *mut CUarray_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUmipmappedArray_st { + _unused: [u8; 0], +} +pub type CUmipmappedArray = *mut CUmipmappedArray_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUtexref_st { + _unused: [u8; 0], +} +pub type CUtexref = *mut CUtexref_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUsurfref_st { + _unused: [u8; 0], +} +pub type CUsurfref = *mut CUsurfref_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUevent_st { + _unused: [u8; 0], +} +pub type CUevent = *mut CUevent_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUstream_st { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstream(pub *mut CUstream_st); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUgraphicsResource_st { + _unused: [u8; 0], +} +pub type CUgraphicsResource = *mut CUgraphicsResource_st; +pub type CUtexObject_v1 = ::core::ffi::c_ulonglong; +pub type CUtexObject = CUtexObject_v1; +pub type CUsurfObject_v1 = ::core::ffi::c_ulonglong; +pub type CUsurfObject = CUsurfObject_v1; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUextMemory_st { + _unused: [u8; 0], +} +pub type CUexternalMemory = *mut CUextMemory_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUextSemaphore_st { + _unused: [u8; 0], +} +pub type CUexternalSemaphore = *mut CUextSemaphore_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUgraph_st { + _unused: [u8; 0], +} +pub type CUgraph = *mut CUgraph_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUgraphNode_st { + _unused: [u8; 0], +} +pub type CUgraphNode = *mut CUgraphNode_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUgraphExec_st { + _unused: [u8; 0], +} +pub type CUgraphExec = *mut CUgraphExec_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUmemPoolHandle_st { + _unused: [u8; 0], +} +pub type CUmemoryPool = *mut CUmemPoolHandle_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUuserObject_st { + _unused: [u8; 0], +} +pub type CUuserObject = *mut CUuserObject_st; +pub type CUgraphConditionalHandle = cuuint64_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUgraphDeviceUpdatableNode_st { + _unused: [u8; 0], +} +pub type CUgraphDeviceNode = *mut CUgraphDeviceUpdatableNode_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUasyncCallbackEntry_st { + _unused: [u8; 0], +} +pub type CUasyncCallbackHandle = *mut CUasyncCallbackEntry_st; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUuuid_st { + pub bytes: [::core::ffi::c_uchar; 16usize], +} +pub type CUuuid = CUuuid_st; +/** Fabric handle - An opaque handle representing a memory allocation + that can be exported to processes in same or different nodes. For IPC + between processes on different nodes they must be connected via the + NVSwitch fabric.*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemFabricHandle_st { + pub data: [::core::ffi::c_uchar; 64usize], +} +/** Fabric handle - An opaque handle representing a memory allocation + that can be exported to processes in same or different nodes. For IPC + between processes on different nodes they must be connected via the + NVSwitch fabric.*/ +pub type CUmemFabricHandle_v1 = CUmemFabricHandle_st; +/** Fabric handle - An opaque handle representing a memory allocation + that can be exported to processes in same or different nodes. For IPC + between processes on different nodes they must be connected via the + NVSwitch fabric.*/ +pub type CUmemFabricHandle = CUmemFabricHandle_v1; +/// CUDA IPC event handle +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUipcEventHandle_st { + pub reserved: [::core::ffi::c_char; 64usize], +} +/// CUDA IPC event handle +pub type CUipcEventHandle_v1 = CUipcEventHandle_st; +/// CUDA IPC event handle +pub type CUipcEventHandle = CUipcEventHandle_v1; +/// CUDA IPC mem handle +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUipcMemHandle_st { + pub reserved: [::core::ffi::c_char; 64usize], +} +/// CUDA IPC mem handle +pub type CUipcMemHandle_v1 = CUipcMemHandle_st; +/// CUDA IPC mem handle +pub type CUipcMemHandle = CUipcMemHandle_v1; +impl CUipcMem_flags_enum { + ///< Automatically enable peer access between remote devices as needed + pub const CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS: CUipcMem_flags_enum = CUipcMem_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// CUDA Ipc Mem Flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUipcMem_flags_enum(pub ::core::ffi::c_uint); +/// CUDA Ipc Mem Flags +pub use self::CUipcMem_flags_enum as CUipcMem_flags; +impl CUmemAttach_flags_enum { + ///< Memory can be accessed by any stream on any device + pub const CU_MEM_ATTACH_GLOBAL: CUmemAttach_flags_enum = CUmemAttach_flags_enum(1); +} +impl CUmemAttach_flags_enum { + ///< Memory cannot be accessed by any stream on any device + pub const CU_MEM_ATTACH_HOST: CUmemAttach_flags_enum = CUmemAttach_flags_enum(2); +} +impl CUmemAttach_flags_enum { + ///< Memory can only be accessed by a single stream on the associated device + pub const CU_MEM_ATTACH_SINGLE: CUmemAttach_flags_enum = CUmemAttach_flags_enum(4); +} +#[repr(transparent)] +/// CUDA Mem Attach Flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAttach_flags_enum(pub ::core::ffi::c_uint); +/// CUDA Mem Attach Flags +pub use self::CUmemAttach_flags_enum as CUmemAttach_flags; +impl CUctx_flags_enum { + ///< Automatic scheduling + pub const CU_CTX_SCHED_AUTO: CUctx_flags_enum = CUctx_flags_enum(0); +} +impl CUctx_flags_enum { + ///< Set spin as default scheduling + pub const CU_CTX_SCHED_SPIN: CUctx_flags_enum = CUctx_flags_enum(1); +} +impl CUctx_flags_enum { + ///< Set yield as default scheduling + pub const CU_CTX_SCHED_YIELD: CUctx_flags_enum = CUctx_flags_enum(2); +} +impl CUctx_flags_enum { + ///< Set blocking synchronization as default scheduling + pub const CU_CTX_SCHED_BLOCKING_SYNC: CUctx_flags_enum = CUctx_flags_enum(4); +} +impl CUctx_flags_enum { + /**< Set blocking synchronization as default scheduling + \deprecated This flag was deprecated as of CUDA 4.0 + and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC.*/ + pub const CU_CTX_BLOCKING_SYNC: CUctx_flags_enum = CUctx_flags_enum(4); +} +impl CUctx_flags_enum { + pub const CU_CTX_SCHED_MASK: CUctx_flags_enum = CUctx_flags_enum(7); +} +impl CUctx_flags_enum { + /**< \deprecated This flag was deprecated as of CUDA 11.0 + and it no longer has any effect. All contexts + as of CUDA 3.2 behave as though the flag is enabled.*/ + pub const CU_CTX_MAP_HOST: CUctx_flags_enum = CUctx_flags_enum(8); +} +impl CUctx_flags_enum { + ///< Keep local memory allocation after launch + pub const CU_CTX_LMEM_RESIZE_TO_MAX: CUctx_flags_enum = CUctx_flags_enum(16); +} +impl CUctx_flags_enum { + ///< Trigger coredumps from exceptions in this context + pub const CU_CTX_COREDUMP_ENABLE: CUctx_flags_enum = CUctx_flags_enum(32); +} +impl CUctx_flags_enum { + ///< Enable user pipe to trigger coredumps in this context + pub const CU_CTX_USER_COREDUMP_ENABLE: CUctx_flags_enum = CUctx_flags_enum(64); +} +impl CUctx_flags_enum { + ///< Ensure synchronous memory operations on this context will synchronize + pub const CU_CTX_SYNC_MEMOPS: CUctx_flags_enum = CUctx_flags_enum(128); +} +impl CUctx_flags_enum { + pub const CU_CTX_FLAGS_MASK: CUctx_flags_enum = CUctx_flags_enum(255); +} +#[repr(transparent)] +/// Context creation flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUctx_flags_enum(pub ::core::ffi::c_uint); +/// Context creation flags +pub use self::CUctx_flags_enum as CUctx_flags; +impl CUevent_sched_flags_enum { + ///< Automatic scheduling + pub const CU_EVENT_SCHED_AUTO: CUevent_sched_flags_enum = CUevent_sched_flags_enum( + 0, + ); +} +impl CUevent_sched_flags_enum { + ///< Set spin as default scheduling + pub const CU_EVENT_SCHED_SPIN: CUevent_sched_flags_enum = CUevent_sched_flags_enum( + 1, + ); +} +impl CUevent_sched_flags_enum { + ///< Set yield as default scheduling + pub const CU_EVENT_SCHED_YIELD: CUevent_sched_flags_enum = CUevent_sched_flags_enum( + 2, + ); +} +impl CUevent_sched_flags_enum { + ///< Set blocking synchronization as default scheduling + pub const CU_EVENT_SCHED_BLOCKING_SYNC: CUevent_sched_flags_enum = CUevent_sched_flags_enum( + 4, + ); +} +#[repr(transparent)] +/// Event sched flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUevent_sched_flags_enum(pub ::core::ffi::c_uint); +/// Event sched flags +pub use self::CUevent_sched_flags_enum as CUevent_sched_flags; +impl CUstream_flags_enum { + ///< Default stream flag + pub const CU_STREAM_DEFAULT: CUstream_flags_enum = CUstream_flags_enum(0); +} +impl CUstream_flags_enum { + ///< Stream does not synchronize with stream 0 (the NULL stream) + pub const CU_STREAM_NON_BLOCKING: CUstream_flags_enum = CUstream_flags_enum(1); +} +#[repr(transparent)] +/// Stream creation flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstream_flags_enum(pub ::core::ffi::c_uint); +/// Stream creation flags +pub use self::CUstream_flags_enum as CUstream_flags; +impl CUevent_flags_enum { + ///< Default event flag + pub const CU_EVENT_DEFAULT: CUevent_flags_enum = CUevent_flags_enum(0); +} +impl CUevent_flags_enum { + ///< Event uses blocking synchronization + pub const CU_EVENT_BLOCKING_SYNC: CUevent_flags_enum = CUevent_flags_enum(1); +} +impl CUevent_flags_enum { + ///< Event will not record timing data + pub const CU_EVENT_DISABLE_TIMING: CUevent_flags_enum = CUevent_flags_enum(2); +} +impl CUevent_flags_enum { + ///< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set + pub const CU_EVENT_INTERPROCESS: CUevent_flags_enum = CUevent_flags_enum(4); +} +#[repr(transparent)] +/// Event creation flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUevent_flags_enum(pub ::core::ffi::c_uint); +/// Event creation flags +pub use self::CUevent_flags_enum as CUevent_flags; +impl CUevent_record_flags_enum { + ///< Default event record flag + pub const CU_EVENT_RECORD_DEFAULT: CUevent_record_flags_enum = CUevent_record_flags_enum( + 0, + ); +} +impl CUevent_record_flags_enum { + /**< When using stream capture, create an event record node + instead of the default behavior. This flag is invalid + when used outside of capture.*/ + pub const CU_EVENT_RECORD_EXTERNAL: CUevent_record_flags_enum = CUevent_record_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Event record flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUevent_record_flags_enum(pub ::core::ffi::c_uint); +/// Event record flags +pub use self::CUevent_record_flags_enum as CUevent_record_flags; +impl CUevent_wait_flags_enum { + ///< Default event wait flag + pub const CU_EVENT_WAIT_DEFAULT: CUevent_wait_flags_enum = CUevent_wait_flags_enum( + 0, + ); +} +impl CUevent_wait_flags_enum { + /**< When using stream capture, create an event wait node + instead of the default behavior. This flag is invalid + when used outside of capture.*/ + pub const CU_EVENT_WAIT_EXTERNAL: CUevent_wait_flags_enum = CUevent_wait_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Event wait flags +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUevent_wait_flags_enum(pub ::core::ffi::c_uint); +/// Event wait flags +pub use self::CUevent_wait_flags_enum as CUevent_wait_flags; +impl CUstreamWaitValue_flags_enum { + /**< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit +values). Note this is a cyclic comparison which ignores wraparound. +(Default behavior.)*/ + pub const CU_STREAM_WAIT_VALUE_GEQ: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum( + 0, + ); +} +impl CUstreamWaitValue_flags_enum { + ///< Wait until *addr == value. + pub const CU_STREAM_WAIT_VALUE_EQ: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum( + 1, + ); +} +impl CUstreamWaitValue_flags_enum { + ///< Wait until (*addr & value) != 0. + pub const CU_STREAM_WAIT_VALUE_AND: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum( + 2, + ); +} +impl CUstreamWaitValue_flags_enum { + /**< Wait until ~(*addr | value) != 0. Support for this operation can be +queried with ::cuDeviceGetAttribute() and +::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.*/ + pub const CU_STREAM_WAIT_VALUE_NOR: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum( + 3, + ); +} +impl CUstreamWaitValue_flags_enum { + /**< Follow the wait operation with a flush of outstanding remote writes. This +means that, if a remote write operation is guaranteed to have reached the +device before the wait can be satisfied, that write is guaranteed to be +visible to downstream device work. The device is permitted to reorder +remote writes internally. For example, this flag would be required if +two remote writes arrive in a defined order, the wait is satisfied by the +second write, and downstream work needs to observe the first write. +Support for this operation is restricted to selected platforms and can be +queried with ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.*/ + pub const CU_STREAM_WAIT_VALUE_FLUSH: CUstreamWaitValue_flags_enum = CUstreamWaitValue_flags_enum( + 1073741824, + ); +} +#[repr(transparent)] +/// Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64 +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamWaitValue_flags_enum(pub ::core::ffi::c_uint); +/// Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64 +pub use self::CUstreamWaitValue_flags_enum as CUstreamWaitValue_flags; +impl CUstreamWriteValue_flags_enum { + ///< Default behavior + pub const CU_STREAM_WRITE_VALUE_DEFAULT: CUstreamWriteValue_flags_enum = CUstreamWriteValue_flags_enum( + 0, + ); +} +impl CUstreamWriteValue_flags_enum { + /**< Permits the write to be reordered with writes which were issued +before it, as a performance optimization. Normally, +::cuStreamWriteValue32 will provide a memory fence before the +write, which has similar semantics to +__threadfence_system() but is scoped to the stream +rather than a CUDA thread. +This flag is not supported in the v2 API.*/ + pub const CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER: CUstreamWriteValue_flags_enum = CUstreamWriteValue_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Flags for ::cuStreamWriteValue32 +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamWriteValue_flags_enum(pub ::core::ffi::c_uint); +/// Flags for ::cuStreamWriteValue32 +pub use self::CUstreamWriteValue_flags_enum as CUstreamWriteValue_flags; +impl CUstreamBatchMemOpType_enum { + ///< Represents a ::cuStreamWaitValue32 operation + pub const CU_STREAM_MEM_OP_WAIT_VALUE_32: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum( + 1, + ); +} +impl CUstreamBatchMemOpType_enum { + ///< Represents a ::cuStreamWriteValue32 operation + pub const CU_STREAM_MEM_OP_WRITE_VALUE_32: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum( + 2, + ); +} +impl CUstreamBatchMemOpType_enum { + ///< Represents a ::cuStreamWaitValue64 operation + pub const CU_STREAM_MEM_OP_WAIT_VALUE_64: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum( + 4, + ); +} +impl CUstreamBatchMemOpType_enum { + ///< Represents a ::cuStreamWriteValue64 operation + pub const CU_STREAM_MEM_OP_WRITE_VALUE_64: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum( + 5, + ); +} +impl CUstreamBatchMemOpType_enum { + ///< Insert a memory barrier of the specified type + pub const CU_STREAM_MEM_OP_BARRIER: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum( + 6, + ); +} +impl CUstreamBatchMemOpType_enum { + /**< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a +standalone operation.*/ + pub const CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES: CUstreamBatchMemOpType_enum = CUstreamBatchMemOpType_enum( + 3, + ); +} +#[repr(transparent)] +/// Operations for ::cuStreamBatchMemOp +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamBatchMemOpType_enum(pub ::core::ffi::c_uint); +/// Operations for ::cuStreamBatchMemOp +pub use self::CUstreamBatchMemOpType_enum as CUstreamBatchMemOpType; +impl CUstreamMemoryBarrier_flags_enum { + ///< System-wide memory barrier. + pub const CU_STREAM_MEMORY_BARRIER_TYPE_SYS: CUstreamMemoryBarrier_flags_enum = CUstreamMemoryBarrier_flags_enum( + 0, + ); +} +impl CUstreamMemoryBarrier_flags_enum { + ///< Limit memory barrier scope to the GPU. + pub const CU_STREAM_MEMORY_BARRIER_TYPE_GPU: CUstreamMemoryBarrier_flags_enum = CUstreamMemoryBarrier_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Flags for ::cuStreamMemoryBarrier +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamMemoryBarrier_flags_enum(pub ::core::ffi::c_uint); +/// Flags for ::cuStreamMemoryBarrier +pub use self::CUstreamMemoryBarrier_flags_enum as CUstreamMemoryBarrier_flags; +/// Per-operation parameters for ::cuStreamBatchMemOp +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUstreamBatchMemOpParams_union { + pub operation: CUstreamBatchMemOpType, + pub waitValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st, + pub writeValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st, + pub flushRemoteWrites: CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st, + pub memoryBarrier: CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st, + pub pad: [cuuint64_t; 6usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st { + pub operation: CUstreamBatchMemOpType, + pub address: CUdeviceptr, + pub __bindgen_anon_1: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1, + pub flags: ::core::ffi::c_uint, + ///< For driver internal use. Initial value is unimportant. + pub alias: CUdeviceptr, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 { + pub value: cuuint32_t, + pub value64: cuuint64_t, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st { + pub operation: CUstreamBatchMemOpType, + pub address: CUdeviceptr, + pub __bindgen_anon_1: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1, + pub flags: ::core::ffi::c_uint, + ///< For driver internal use. Initial value is unimportant. + pub alias: CUdeviceptr, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 { + pub value: cuuint32_t, + pub value64: cuuint64_t, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st { + pub operation: CUstreamBatchMemOpType, + pub flags: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st { + pub operation: CUstreamBatchMemOpType, + pub flags: ::core::ffi::c_uint, +} +/// Per-operation parameters for ::cuStreamBatchMemOp +pub type CUstreamBatchMemOpParams_v1 = CUstreamBatchMemOpParams_union; +/// Per-operation parameters for ::cuStreamBatchMemOp +pub type CUstreamBatchMemOpParams = CUstreamBatchMemOpParams_v1; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st { + pub ctx: CUcontext, + pub count: ::core::ffi::c_uint, + pub paramArray: *mut CUstreamBatchMemOpParams, + pub flags: ::core::ffi::c_uint, +} +pub type CUDA_BATCH_MEM_OP_NODE_PARAMS_v1 = CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st; +pub type CUDA_BATCH_MEM_OP_NODE_PARAMS = CUDA_BATCH_MEM_OP_NODE_PARAMS_v1; +/// Batch memory operation node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st { + ///< Context to use for the operations. + pub ctx: CUcontext, + ///< Number of operations in paramArray. + pub count: ::core::ffi::c_uint, + ///< Array of batch memory operations. + pub paramArray: *mut CUstreamBatchMemOpParams, + ///< Flags to control the node. + pub flags: ::core::ffi::c_uint, +} +/// Batch memory operation node parameters +pub type CUDA_BATCH_MEM_OP_NODE_PARAMS_v2 = CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st; +impl CUoccupancy_flags_enum { + ///< Default behavior + pub const CU_OCCUPANCY_DEFAULT: CUoccupancy_flags_enum = CUoccupancy_flags_enum(0); +} +impl CUoccupancy_flags_enum { + ///< Assume global caching is enabled and cannot be automatically turned off + pub const CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE: CUoccupancy_flags_enum = CUoccupancy_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Occupancy calculator flag +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUoccupancy_flags_enum(pub ::core::ffi::c_uint); +/// Occupancy calculator flag +pub use self::CUoccupancy_flags_enum as CUoccupancy_flags; +impl CUstreamUpdateCaptureDependencies_flags_enum { + ///< Add new nodes to the dependency set + pub const CU_STREAM_ADD_CAPTURE_DEPENDENCIES: CUstreamUpdateCaptureDependencies_flags_enum = CUstreamUpdateCaptureDependencies_flags_enum( + 0, + ); +} +impl CUstreamUpdateCaptureDependencies_flags_enum { + ///< Replace the dependency set with the new nodes + pub const CU_STREAM_SET_CAPTURE_DEPENDENCIES: CUstreamUpdateCaptureDependencies_flags_enum = CUstreamUpdateCaptureDependencies_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Flags for ::cuStreamUpdateCaptureDependencies +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamUpdateCaptureDependencies_flags_enum(pub ::core::ffi::c_uint); +/// Flags for ::cuStreamUpdateCaptureDependencies +pub use self::CUstreamUpdateCaptureDependencies_flags_enum as CUstreamUpdateCaptureDependencies_flags; +impl CUasyncNotificationType_enum { + pub const CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET: CUasyncNotificationType_enum = CUasyncNotificationType_enum( + 1, + ); +} +#[repr(transparent)] +/// Types of async notification that can be sent +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUasyncNotificationType_enum(pub ::core::ffi::c_uint); +/// Types of async notification that can be sent +pub use self::CUasyncNotificationType_enum as CUasyncNotificationType; +/// Information passed to the user via the async notification callback +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUasyncNotificationInfo_st { + pub type_: CUasyncNotificationType, + pub info: CUasyncNotificationInfo_st__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUasyncNotificationInfo_st__bindgen_ty_1 { + pub overBudget: CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1 { + pub bytesOverBudget: ::core::ffi::c_ulonglong, +} +/// Information passed to the user via the async notification callback +pub type CUasyncNotificationInfo = CUasyncNotificationInfo_st; +/** CUDA async notification callback + \param info Information describing what actions to take as a result of this trim notification. + \param userData Pointer to user defined data provided at registration. + \param callback The callback handle associated with this specific callback.*/ +pub type CUasyncCallback = ::core::option::Option< + unsafe extern "system" fn( + info: *mut CUasyncNotificationInfo, + userData: *mut ::core::ffi::c_void, + callback: CUasyncCallbackHandle, + ), +>; +impl CUarray_format_enum { + ///< Unsigned 8-bit integers + pub const CU_AD_FORMAT_UNSIGNED_INT8: CUarray_format_enum = CUarray_format_enum(1); +} +impl CUarray_format_enum { + ///< Unsigned 16-bit integers + pub const CU_AD_FORMAT_UNSIGNED_INT16: CUarray_format_enum = CUarray_format_enum(2); +} +impl CUarray_format_enum { + ///< Unsigned 32-bit integers + pub const CU_AD_FORMAT_UNSIGNED_INT32: CUarray_format_enum = CUarray_format_enum(3); +} +impl CUarray_format_enum { + ///< Signed 8-bit integers + pub const CU_AD_FORMAT_SIGNED_INT8: CUarray_format_enum = CUarray_format_enum(8); +} +impl CUarray_format_enum { + ///< Signed 16-bit integers + pub const CU_AD_FORMAT_SIGNED_INT16: CUarray_format_enum = CUarray_format_enum(9); +} +impl CUarray_format_enum { + ///< Signed 32-bit integers + pub const CU_AD_FORMAT_SIGNED_INT32: CUarray_format_enum = CUarray_format_enum(10); +} +impl CUarray_format_enum { + ///< 16-bit floating point + pub const CU_AD_FORMAT_HALF: CUarray_format_enum = CUarray_format_enum(16); +} +impl CUarray_format_enum { + ///< 32-bit floating point + pub const CU_AD_FORMAT_FLOAT: CUarray_format_enum = CUarray_format_enum(32); +} +impl CUarray_format_enum { + ///< 8-bit YUV planar format, with 4:2:0 sampling + pub const CU_AD_FORMAT_NV12: CUarray_format_enum = CUarray_format_enum(176); +} +impl CUarray_format_enum { + ///< 1 channel unsigned 8-bit normalized integer + pub const CU_AD_FORMAT_UNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(192); +} +impl CUarray_format_enum { + ///< 2 channel unsigned 8-bit normalized integer + pub const CU_AD_FORMAT_UNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(193); +} +impl CUarray_format_enum { + ///< 4 channel unsigned 8-bit normalized integer + pub const CU_AD_FORMAT_UNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(194); +} +impl CUarray_format_enum { + ///< 1 channel unsigned 16-bit normalized integer + pub const CU_AD_FORMAT_UNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(195); +} +impl CUarray_format_enum { + ///< 2 channel unsigned 16-bit normalized integer + pub const CU_AD_FORMAT_UNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(196); +} +impl CUarray_format_enum { + ///< 4 channel unsigned 16-bit normalized integer + pub const CU_AD_FORMAT_UNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(197); +} +impl CUarray_format_enum { + ///< 1 channel signed 8-bit normalized integer + pub const CU_AD_FORMAT_SNORM_INT8X1: CUarray_format_enum = CUarray_format_enum(198); +} +impl CUarray_format_enum { + ///< 2 channel signed 8-bit normalized integer + pub const CU_AD_FORMAT_SNORM_INT8X2: CUarray_format_enum = CUarray_format_enum(199); +} +impl CUarray_format_enum { + ///< 4 channel signed 8-bit normalized integer + pub const CU_AD_FORMAT_SNORM_INT8X4: CUarray_format_enum = CUarray_format_enum(200); +} +impl CUarray_format_enum { + ///< 1 channel signed 16-bit normalized integer + pub const CU_AD_FORMAT_SNORM_INT16X1: CUarray_format_enum = CUarray_format_enum(201); +} +impl CUarray_format_enum { + ///< 2 channel signed 16-bit normalized integer + pub const CU_AD_FORMAT_SNORM_INT16X2: CUarray_format_enum = CUarray_format_enum(202); +} +impl CUarray_format_enum { + ///< 4 channel signed 16-bit normalized integer + pub const CU_AD_FORMAT_SNORM_INT16X4: CUarray_format_enum = CUarray_format_enum(203); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC1 compression) format + pub const CU_AD_FORMAT_BC1_UNORM: CUarray_format_enum = CUarray_format_enum(145); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding + pub const CU_AD_FORMAT_BC1_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum( + 146, + ); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC2 compression) format + pub const CU_AD_FORMAT_BC2_UNORM: CUarray_format_enum = CUarray_format_enum(147); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding + pub const CU_AD_FORMAT_BC2_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum( + 148, + ); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC3 compression) format + pub const CU_AD_FORMAT_BC3_UNORM: CUarray_format_enum = CUarray_format_enum(149); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding + pub const CU_AD_FORMAT_BC3_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum( + 150, + ); +} +impl CUarray_format_enum { + ///< 1 channel unsigned normalized block-compressed (BC4 compression) format + pub const CU_AD_FORMAT_BC4_UNORM: CUarray_format_enum = CUarray_format_enum(151); +} +impl CUarray_format_enum { + ///< 1 channel signed normalized block-compressed (BC4 compression) format + pub const CU_AD_FORMAT_BC4_SNORM: CUarray_format_enum = CUarray_format_enum(152); +} +impl CUarray_format_enum { + ///< 2 channel unsigned normalized block-compressed (BC5 compression) format + pub const CU_AD_FORMAT_BC5_UNORM: CUarray_format_enum = CUarray_format_enum(153); +} +impl CUarray_format_enum { + ///< 2 channel signed normalized block-compressed (BC5 compression) format + pub const CU_AD_FORMAT_BC5_SNORM: CUarray_format_enum = CUarray_format_enum(154); +} +impl CUarray_format_enum { + ///< 3 channel unsigned half-float block-compressed (BC6H compression) format + pub const CU_AD_FORMAT_BC6H_UF16: CUarray_format_enum = CUarray_format_enum(155); +} +impl CUarray_format_enum { + ///< 3 channel signed half-float block-compressed (BC6H compression) format + pub const CU_AD_FORMAT_BC6H_SF16: CUarray_format_enum = CUarray_format_enum(156); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC7 compression) format + pub const CU_AD_FORMAT_BC7_UNORM: CUarray_format_enum = CUarray_format_enum(157); +} +impl CUarray_format_enum { + ///< 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding + pub const CU_AD_FORMAT_BC7_UNORM_SRGB: CUarray_format_enum = CUarray_format_enum( + 158, + ); +} +#[repr(transparent)] +/// Array formats +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUarray_format_enum(pub ::core::ffi::c_uint); +/// Array formats +pub use self::CUarray_format_enum as CUarray_format; +impl CUaddress_mode_enum { + ///< Wrapping address mode + pub const CU_TR_ADDRESS_MODE_WRAP: CUaddress_mode_enum = CUaddress_mode_enum(0); +} +impl CUaddress_mode_enum { + ///< Clamp to edge address mode + pub const CU_TR_ADDRESS_MODE_CLAMP: CUaddress_mode_enum = CUaddress_mode_enum(1); +} +impl CUaddress_mode_enum { + ///< Mirror address mode + pub const CU_TR_ADDRESS_MODE_MIRROR: CUaddress_mode_enum = CUaddress_mode_enum(2); +} +impl CUaddress_mode_enum { + ///< Border address mode + pub const CU_TR_ADDRESS_MODE_BORDER: CUaddress_mode_enum = CUaddress_mode_enum(3); +} +#[repr(transparent)] +/// Texture reference addressing modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUaddress_mode_enum(pub ::core::ffi::c_uint); +/// Texture reference addressing modes +pub use self::CUaddress_mode_enum as CUaddress_mode; +impl CUfilter_mode_enum { + ///< Point filter mode + pub const CU_TR_FILTER_MODE_POINT: CUfilter_mode_enum = CUfilter_mode_enum(0); +} +impl CUfilter_mode_enum { + ///< Linear filter mode + pub const CU_TR_FILTER_MODE_LINEAR: CUfilter_mode_enum = CUfilter_mode_enum(1); +} +#[repr(transparent)] +/// Texture reference filtering modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUfilter_mode_enum(pub ::core::ffi::c_uint); +/// Texture reference filtering modes +pub use self::CUfilter_mode_enum as CUfilter_mode; +impl CUdevice_attribute_enum { + ///< Maximum number of threads per block + pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum( + 1, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum block dimension X + pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: CUdevice_attribute_enum = CUdevice_attribute_enum( + 2, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum block dimension Y + pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: CUdevice_attribute_enum = CUdevice_attribute_enum( + 3, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum block dimension Z + pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: CUdevice_attribute_enum = CUdevice_attribute_enum( + 4, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum grid dimension X + pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: CUdevice_attribute_enum = CUdevice_attribute_enum( + 5, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum grid dimension Y + pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: CUdevice_attribute_enum = CUdevice_attribute_enum( + 6, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum grid dimension Z + pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: CUdevice_attribute_enum = CUdevice_attribute_enum( + 7, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum shared memory available per block in bytes + pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum( + 8, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK + pub const CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum( + 8, + ); +} +impl CUdevice_attribute_enum { + ///< Memory available on device for __constant__ variables in a CUDA C kernel in bytes + pub const CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum( + 9, + ); +} +impl CUdevice_attribute_enum { + ///< Warp size in threads + pub const CU_DEVICE_ATTRIBUTE_WARP_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 10, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum pitch in bytes allowed by memory copies + pub const CU_DEVICE_ATTRIBUTE_MAX_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 11, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum number of 32-bit registers available per block + pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum( + 12, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK + pub const CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum( + 12, + ); +} +impl CUdevice_attribute_enum { + ///< Typical clock frequency in kilohertz + pub const CU_DEVICE_ATTRIBUTE_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 13, + ); +} +impl CUdevice_attribute_enum { + ///< Alignment requirement for textures + pub const CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 14, + ); +} +impl CUdevice_attribute_enum { + ///< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. + pub const CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: CUdevice_attribute_enum = CUdevice_attribute_enum( + 15, + ); +} +impl CUdevice_attribute_enum { + ///< Number of multiprocessors on device + pub const CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 16, + ); +} +impl CUdevice_attribute_enum { + ///< Specifies whether there is a run time limit on kernels + pub const CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 17, + ); +} +impl CUdevice_attribute_enum { + ///< Device is integrated with host memory + pub const CU_DEVICE_ATTRIBUTE_INTEGRATED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 18, + ); +} +impl CUdevice_attribute_enum { + ///< Device can map host memory into CUDA address space + pub const CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum( + 19, + ); +} +impl CUdevice_attribute_enum { + ///< Compute mode (See ::CUcomputemode for details) + pub const CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 20, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 1D texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 21, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 22, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D texture height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 23, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 3D texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 24, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 3D texture height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 25, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 3D texture depth + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 26, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D layered texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 27, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D layered texture height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 28, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum layers in a 2D layered texture + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 29, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 27, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 28, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES: CUdevice_attribute_enum = CUdevice_attribute_enum( + 29, + ); +} +impl CUdevice_attribute_enum { + ///< Alignment requirement for surfaces + pub const CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 30, + ); +} +impl CUdevice_attribute_enum { + ///< Device can possibly execute multiple kernels concurrently + pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 31, + ); +} +impl CUdevice_attribute_enum { + ///< Device has ECC support enabled + pub const CU_DEVICE_ATTRIBUTE_ECC_ENABLED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 32, + ); +} +impl CUdevice_attribute_enum { + ///< PCI bus ID of the device + pub const CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: CUdevice_attribute_enum = CUdevice_attribute_enum( + 33, + ); +} +impl CUdevice_attribute_enum { + ///< PCI device ID of the device + pub const CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: CUdevice_attribute_enum = CUdevice_attribute_enum( + 34, + ); +} +impl CUdevice_attribute_enum { + ///< Device is using TCC driver model + pub const CU_DEVICE_ATTRIBUTE_TCC_DRIVER: CUdevice_attribute_enum = CUdevice_attribute_enum( + 35, + ); +} +impl CUdevice_attribute_enum { + ///< Peak memory clock frequency in kilohertz + pub const CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 36, + ); +} +impl CUdevice_attribute_enum { + ///< Global memory bus width in bits + pub const CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 37, + ); +} +impl CUdevice_attribute_enum { + ///< Size of L2 cache in bytes + pub const CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 38, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum resident threads per multiprocessor + pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum( + 39, + ); +} +impl CUdevice_attribute_enum { + ///< Number of asynchronous engines + pub const CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 40, + ); +} +impl CUdevice_attribute_enum { + ///< Device shares a unified address space with the host + pub const CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: CUdevice_attribute_enum = CUdevice_attribute_enum( + 41, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 1D layered texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 42, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum layers in a 1D layered texture + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 43, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, do not use. + pub const CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER: CUdevice_attribute_enum = CUdevice_attribute_enum( + 44, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 45, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 46, + ); +} +impl CUdevice_attribute_enum { + ///< Alternate maximum 3D texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 47, + ); +} +impl CUdevice_attribute_enum { + ///< Alternate maximum 3D texture height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 48, + ); +} +impl CUdevice_attribute_enum { + ///< Alternate maximum 3D texture depth + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 49, + ); +} +impl CUdevice_attribute_enum { + ///< PCI domain ID of the device + pub const CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: CUdevice_attribute_enum = CUdevice_attribute_enum( + 50, + ); +} +impl CUdevice_attribute_enum { + ///< Pitch alignment requirement for textures + pub const CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 51, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum cubemap texture width/height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 52, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum cubemap layered texture width/height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 53, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum layers in a cubemap layered texture + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 54, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 1D surface width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 55, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D surface width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 56, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D surface height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 57, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 3D surface width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 58, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 3D surface height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 59, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 3D surface depth + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 60, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 1D layered surface width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 61, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum layers in a 1D layered surface + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 62, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D layered surface width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 63, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D layered surface height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 64, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum layers in a 2D layered surface + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 65, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum cubemap surface width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 66, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum cubemap layered surface width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 67, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum layers in a cubemap layered surface + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 68, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead. + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 69, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D linear texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 70, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D linear texture height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 71, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum 2D linear texture pitch in bytes + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 72, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum mipmapped 2D texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 73, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum mipmapped 2D texture height + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 74, + ); +} +impl CUdevice_attribute_enum { + ///< Major compute capability version number + pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: CUdevice_attribute_enum = CUdevice_attribute_enum( + 75, + ); +} +impl CUdevice_attribute_enum { + ///< Minor compute capability version number + pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: CUdevice_attribute_enum = CUdevice_attribute_enum( + 76, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum mipmapped 1D texture width + pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 77, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports stream priorities + pub const CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 78, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports caching globals in L1 + pub const CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 79, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports caching locals in L1 + pub const CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 80, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum shared memory available per multiprocessor in bytes + pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum( + 81, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum number of 32-bit registers available per multiprocessor + pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum( + 82, + ); +} +impl CUdevice_attribute_enum { + ///< Device can allocate managed memory on this system + pub const CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: CUdevice_attribute_enum = CUdevice_attribute_enum( + 83, + ); +} +impl CUdevice_attribute_enum { + ///< Device is on a multi-GPU board + pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: CUdevice_attribute_enum = CUdevice_attribute_enum( + 84, + ); +} +impl CUdevice_attribute_enum { + ///< Unique id for a group of devices on the same multi-GPU board + pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: CUdevice_attribute_enum = CUdevice_attribute_enum( + 85, + ); +} +impl CUdevice_attribute_enum { + ///< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware) + pub const CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 86, + ); +} +impl CUdevice_attribute_enum { + ///< Ratio of single precision performance (in floating-point operations per second) to double precision performance + pub const CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: CUdevice_attribute_enum = CUdevice_attribute_enum( + 87, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports coherently accessing pageable memory without calling cudaHostRegister on it + pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 88, + ); +} +impl CUdevice_attribute_enum { + ///< Device can coherently access managed memory concurrently with the CPU + pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 89, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports compute preemption. + pub const CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 90, + ); +} +impl CUdevice_attribute_enum { + ///< Device can access host registered memory at the same virtual address as the CPU + pub const CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: CUdevice_attribute_enum = CUdevice_attribute_enum( + 91, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, along with v1 MemOps API, ::cuStreamBatchMemOp and related APIs are supported. + pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1: CUdevice_attribute_enum = CUdevice_attribute_enum( + 92, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, along with v1 MemOps API, 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs. + pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1: CUdevice_attribute_enum = CUdevice_attribute_enum( + 93, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, along with v1 MemOps API, ::CU_STREAM_WAIT_VALUE_NOR is supported. + pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1: CUdevice_attribute_enum = CUdevice_attribute_enum( + 94, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel + pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 95, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated. + pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 96, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum optin shared memory per block + pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: CUdevice_attribute_enum = CUdevice_attribute_enum( + 97, + ); +} +impl CUdevice_attribute_enum { + ///< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details. + pub const CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES: CUdevice_attribute_enum = CUdevice_attribute_enum( + 98, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports host memory registration via ::cudaHostRegister. + pub const CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 99, + ); +} +impl CUdevice_attribute_enum { + ///< Device accesses pageable memory via the host's page tables. + pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: CUdevice_attribute_enum = CUdevice_attribute_enum( + 100, + ); +} +impl CUdevice_attribute_enum { + ///< The host can directly access managed memory on the device without migration. + pub const CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: CUdevice_attribute_enum = CUdevice_attribute_enum( + 101, + ); +} +impl CUdevice_attribute_enum { + ///< Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED + pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 102, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs + pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 102, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 103, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 104, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 105, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum number of blocks per multiprocessor + pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = CUdevice_attribute_enum( + 106, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports compression of memory + pub const CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 107, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum L2 persisting lines capacity setting in bytes. + pub const CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 108, + ); +} +impl CUdevice_attribute_enum { + ///< Maximum value of CUaccessPolicyWindow::num_bytes. + pub const CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum( + 109, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate + pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 110, + ); +} +impl CUdevice_attribute_enum { + ///< Shared memory reserved by CUDA driver per block in bytes + pub const CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = CUdevice_attribute_enum( + 111, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays + pub const CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 112, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU + pub const CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 113, + ); +} +impl CUdevice_attribute_enum { + ///< External timeline semaphore interop is supported on the device + pub const CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 114, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs + pub const CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 115, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 116, + ); +} +impl CUdevice_attribute_enum { + ///< The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum + pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 117, + ); +} +impl CUdevice_attribute_enum { + ///< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here. + pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING: CUdevice_attribute_enum = CUdevice_attribute_enum( + 118, + ); +} +impl CUdevice_attribute_enum { + ///< Handle types supported with mempool based IPC + pub const CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES: CUdevice_attribute_enum = CUdevice_attribute_enum( + 119, + ); +} +impl CUdevice_attribute_enum { + ///< Indicates device supports cluster launch + pub const CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH: CUdevice_attribute_enum = CUdevice_attribute_enum( + 120, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays + pub const CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 121, + ); +} +impl CUdevice_attribute_enum { + ///< 64-bit operations are supported in ::cuStreamBatchMemOp and related MemOp APIs. + pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 122, + ); +} +impl CUdevice_attribute_enum { + ///< ::CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs. + pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR: CUdevice_attribute_enum = CUdevice_attribute_enum( + 123, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports buffer sharing with dma_buf mechanism. + pub const CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 124, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports IPC Events. + pub const CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 125, + ); +} +impl CUdevice_attribute_enum { + ///< Number of memory domains the device supports. + pub const CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT: CUdevice_attribute_enum = CUdevice_attribute_enum( + 126, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports accessing memory using Tensor Map. + pub const CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 127, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports exporting memory to a fabric handle with cuMemExportToShareableHandle() or requested with cuMemCreate() + pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 128, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports unified function pointers. + pub const CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS: CUdevice_attribute_enum = CUdevice_attribute_enum( + 129, + ); +} +impl CUdevice_attribute_enum { + pub const CU_DEVICE_ATTRIBUTE_NUMA_CONFIG: CUdevice_attribute_enum = CUdevice_attribute_enum( + 130, + ); +} +impl CUdevice_attribute_enum { + pub const CU_DEVICE_ATTRIBUTE_NUMA_ID: CUdevice_attribute_enum = CUdevice_attribute_enum( + 131, + ); +} +impl CUdevice_attribute_enum { + ///< Device supports switch multicast and reduction operations. + pub const CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 132, + ); +} +impl CUdevice_attribute_enum { + ///< Indicates if contexts created on this device will be shared via MPS + pub const CU_DEVICE_ATTRIBUTE_MPS_ENABLED: CUdevice_attribute_enum = CUdevice_attribute_enum( + 133, + ); +} +impl CUdevice_attribute_enum { + ///< NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA. + pub const CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID: CUdevice_attribute_enum = CUdevice_attribute_enum( + 134, + ); +} +impl CUdevice_attribute_enum { + pub const CU_DEVICE_ATTRIBUTE_MAX: CUdevice_attribute_enum = CUdevice_attribute_enum( + 135, + ); +} +#[repr(transparent)] +/// Device properties +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdevice_attribute_enum(pub ::core::ffi::c_uint); +/// Device properties +pub use self::CUdevice_attribute_enum as CUdevice_attribute; +/// Legacy device properties +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdevprop_st { + ///< Maximum number of threads per block + pub maxThreadsPerBlock: ::core::ffi::c_int, + ///< Maximum size of each dimension of a block + pub maxThreadsDim: [::core::ffi::c_int; 3usize], + ///< Maximum size of each dimension of a grid + pub maxGridSize: [::core::ffi::c_int; 3usize], + ///< Shared memory available per block in bytes + pub sharedMemPerBlock: ::core::ffi::c_int, + ///< Constant memory available on device in bytes + pub totalConstantMemory: ::core::ffi::c_int, + ///< Warp size in threads + pub SIMDWidth: ::core::ffi::c_int, + ///< Maximum pitch in bytes allowed by memory copies + pub memPitch: ::core::ffi::c_int, + ///< 32-bit registers available per block + pub regsPerBlock: ::core::ffi::c_int, + ///< Clock frequency in kilohertz + pub clockRate: ::core::ffi::c_int, + ///< Alignment requirement for textures + pub textureAlign: ::core::ffi::c_int, +} +/// Legacy device properties +pub type CUdevprop_v1 = CUdevprop_st; +/// Legacy device properties +pub type CUdevprop = CUdevprop_v1; +impl CUpointer_attribute_enum { + ///< The ::CUcontext on which a pointer was allocated or registered + pub const CU_POINTER_ATTRIBUTE_CONTEXT: CUpointer_attribute_enum = CUpointer_attribute_enum( + 1, + ); +} +impl CUpointer_attribute_enum { + ///< The ::CUmemorytype describing the physical location of a pointer + pub const CU_POINTER_ATTRIBUTE_MEMORY_TYPE: CUpointer_attribute_enum = CUpointer_attribute_enum( + 2, + ); +} +impl CUpointer_attribute_enum { + ///< The address at which a pointer's memory may be accessed on the device + pub const CU_POINTER_ATTRIBUTE_DEVICE_POINTER: CUpointer_attribute_enum = CUpointer_attribute_enum( + 3, + ); +} +impl CUpointer_attribute_enum { + ///< The address at which a pointer's memory may be accessed on the host + pub const CU_POINTER_ATTRIBUTE_HOST_POINTER: CUpointer_attribute_enum = CUpointer_attribute_enum( + 4, + ); +} +impl CUpointer_attribute_enum { + ///< A pair of tokens for use with the nv-p2p.h Linux kernel interface + pub const CU_POINTER_ATTRIBUTE_P2P_TOKENS: CUpointer_attribute_enum = CUpointer_attribute_enum( + 5, + ); +} +impl CUpointer_attribute_enum { + ///< Synchronize every synchronous memory operation initiated on this region + pub const CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: CUpointer_attribute_enum = CUpointer_attribute_enum( + 6, + ); +} +impl CUpointer_attribute_enum { + ///< A process-wide unique ID for an allocated memory region + pub const CU_POINTER_ATTRIBUTE_BUFFER_ID: CUpointer_attribute_enum = CUpointer_attribute_enum( + 7, + ); +} +impl CUpointer_attribute_enum { + ///< Indicates if the pointer points to managed memory + pub const CU_POINTER_ATTRIBUTE_IS_MANAGED: CUpointer_attribute_enum = CUpointer_attribute_enum( + 8, + ); +} +impl CUpointer_attribute_enum { + ///< A device ordinal of a device on which a pointer was allocated or registered + pub const CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: CUpointer_attribute_enum = CUpointer_attribute_enum( + 9, + ); +} +impl CUpointer_attribute_enum { + ///< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise + pub const CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: CUpointer_attribute_enum = CUpointer_attribute_enum( + 10, + ); +} +impl CUpointer_attribute_enum { + ///< Starting address for this requested pointer + pub const CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: CUpointer_attribute_enum = CUpointer_attribute_enum( + 11, + ); +} +impl CUpointer_attribute_enum { + ///< Size of the address range for this requested pointer + pub const CU_POINTER_ATTRIBUTE_RANGE_SIZE: CUpointer_attribute_enum = CUpointer_attribute_enum( + 12, + ); +} +impl CUpointer_attribute_enum { + ///< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise + pub const CU_POINTER_ATTRIBUTE_MAPPED: CUpointer_attribute_enum = CUpointer_attribute_enum( + 13, + ); +} +impl CUpointer_attribute_enum { + ///< Bitmask of allowed ::CUmemAllocationHandleType for this allocation + pub const CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: CUpointer_attribute_enum = CUpointer_attribute_enum( + 14, + ); +} +impl CUpointer_attribute_enum { + ///< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API + pub const CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE: CUpointer_attribute_enum = CUpointer_attribute_enum( + 15, + ); +} +impl CUpointer_attribute_enum { + ///< Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given + pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAGS: CUpointer_attribute_enum = CUpointer_attribute_enum( + 16, + ); +} +impl CUpointer_attribute_enum { + ///< Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. + pub const CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: CUpointer_attribute_enum = CUpointer_attribute_enum( + 17, + ); +} +impl CUpointer_attribute_enum { + ///< Size of the actual underlying mapping that the pointer belongs to + pub const CU_POINTER_ATTRIBUTE_MAPPING_SIZE: CUpointer_attribute_enum = CUpointer_attribute_enum( + 18, + ); +} +impl CUpointer_attribute_enum { + ///< The start address of the mapping that the pointer belongs to + pub const CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR: CUpointer_attribute_enum = CUpointer_attribute_enum( + 19, + ); +} +impl CUpointer_attribute_enum { + ///< A process-wide unique id corresponding to the physical allocation the pointer belongs to + pub const CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID: CUpointer_attribute_enum = CUpointer_attribute_enum( + 20, + ); +} +#[repr(transparent)] +/// Pointer information +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUpointer_attribute_enum(pub ::core::ffi::c_uint); +/// Pointer information +pub use self::CUpointer_attribute_enum as CUpointer_attribute; +impl CUfunction_attribute_enum { + /** The maximum number of threads per block, beyond which a launch of the + function would fail. This number depends on both the function and the + device on which the function is currently loaded.*/ + pub const CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUfunction_attribute_enum = CUfunction_attribute_enum( + 0, + ); +} +impl CUfunction_attribute_enum { + /** The size in bytes of statically-allocated shared memory required by + this function. This does not include dynamically-allocated shared + memory requested by the user at runtime.*/ + pub const CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum( + 1, + ); +} +impl CUfunction_attribute_enum { + /** The size in bytes of user-allocated constant memory required by this + function.*/ + pub const CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum( + 2, + ); +} +impl CUfunction_attribute_enum { + /// The size in bytes of local memory used by each thread of this function. + pub const CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum( + 3, + ); +} +impl CUfunction_attribute_enum { + /// The number of registers used by each thread of this function. + pub const CU_FUNC_ATTRIBUTE_NUM_REGS: CUfunction_attribute_enum = CUfunction_attribute_enum( + 4, + ); +} +impl CUfunction_attribute_enum { + /** The PTX virtual architecture version for which the function was + compiled. This value is the major PTX version * 10 + the minor PTX + version, so a PTX version 1.3 function would return the value 13. + Note that this may return the undefined value of 0 for cubins + compiled prior to CUDA 3.0.*/ + pub const CU_FUNC_ATTRIBUTE_PTX_VERSION: CUfunction_attribute_enum = CUfunction_attribute_enum( + 5, + ); +} +impl CUfunction_attribute_enum { + /** The binary architecture version for which the function was compiled. + This value is the major binary version * 10 + the minor binary version, + so a binary version 1.3 function would return the value 13. Note that + this will return a value of 10 for legacy cubins that do not have a + properly-encoded binary architecture version.*/ + pub const CU_FUNC_ATTRIBUTE_BINARY_VERSION: CUfunction_attribute_enum = CUfunction_attribute_enum( + 6, + ); +} +impl CUfunction_attribute_enum { + /** The attribute to indicate whether the function has been compiled with + user specified option "-Xptxas --dlcm=ca" set .*/ + pub const CU_FUNC_ATTRIBUTE_CACHE_MODE_CA: CUfunction_attribute_enum = CUfunction_attribute_enum( + 7, + ); +} +impl CUfunction_attribute_enum { + /** The maximum size in bytes of dynamically-allocated shared memory that can be used by + this function. If the user-specified dynamic shared memory size is larger than this + value, the launch will fail. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: CUfunction_attribute_enum = CUfunction_attribute_enum( + 8, + ); +} +impl CUfunction_attribute_enum { + /** On devices where the L1 cache and shared memory use the same hardware resources, + this sets the shared memory carveout preference, in percent of the total shared memory. + Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR. + This is only a hint, and the driver can choose a different ratio if required to execute the function. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: CUfunction_attribute_enum = CUfunction_attribute_enum( + 9, + ); +} +impl CUfunction_attribute_enum { + /** If this attribute is set, the kernel must launch with a valid cluster + size specified. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: CUfunction_attribute_enum = CUfunction_attribute_enum( + 10, + ); +} +impl CUfunction_attribute_enum { + /** The required cluster width in blocks. The values must either all be 0 or + all be positive. The validity of the cluster dimensions is otherwise + checked at launch time. + + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: CUfunction_attribute_enum = CUfunction_attribute_enum( + 11, + ); +} +impl CUfunction_attribute_enum { + /** The required cluster height in blocks. The values must either all be 0 or + all be positive. The validity of the cluster dimensions is otherwise + checked at launch time. + + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: CUfunction_attribute_enum = CUfunction_attribute_enum( + 12, + ); +} +impl CUfunction_attribute_enum { + /** The required cluster depth in blocks. The values must either all be 0 or + all be positive. The validity of the cluster dimensions is otherwise + checked at launch time. + + If the value is set during compile time, it cannot be set at runtime. + Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: CUfunction_attribute_enum = CUfunction_attribute_enum( + 13, + ); +} +impl CUfunction_attribute_enum { + /** Whether the function can be launched with non-portable cluster size. 1 is + allowed, 0 is disallowed. A non-portable cluster size may only function + on the specific SKUs the program is tested on. The launch might fail if + the program is run on a different hardware platform. + + CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking + whether the desired size can be launched on the current device. + + Portable Cluster Size + + A portable cluster size is guaranteed to be functional on all compute + capabilities higher than the target compute capability. The portable + cluster size for sm_90 is 8 blocks per cluster. This value may increase + for future compute capabilities. + + The specific hardware unit may support higher cluster sizes that’s not + guaranteed to be portable. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: CUfunction_attribute_enum = CUfunction_attribute_enum( + 14, + ); +} +impl CUfunction_attribute_enum { + /** The block scheduling policy of a function. The value type is + CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: CUfunction_attribute_enum = CUfunction_attribute_enum( + 15, + ); +} +impl CUfunction_attribute_enum { + /** The block scheduling policy of a function. The value type is + CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. + See ::cuFuncSetAttribute, ::cuKernelSetAttribute*/ + pub const CU_FUNC_ATTRIBUTE_MAX: CUfunction_attribute_enum = CUfunction_attribute_enum( + 16, + ); +} +#[repr(transparent)] +/// Function properties +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUfunction_attribute_enum(pub ::core::ffi::c_uint); +/// Function properties +pub use self::CUfunction_attribute_enum as CUfunction_attribute; +impl CUfunc_cache_enum { + ///< no preference for shared memory or L1 (default) + pub const CU_FUNC_CACHE_PREFER_NONE: CUfunc_cache_enum = CUfunc_cache_enum(0); +} +impl CUfunc_cache_enum { + ///< prefer larger shared memory and smaller L1 cache + pub const CU_FUNC_CACHE_PREFER_SHARED: CUfunc_cache_enum = CUfunc_cache_enum(1); +} +impl CUfunc_cache_enum { + ///< prefer larger L1 cache and smaller shared memory + pub const CU_FUNC_CACHE_PREFER_L1: CUfunc_cache_enum = CUfunc_cache_enum(2); +} +impl CUfunc_cache_enum { + ///< prefer equal sized L1 cache and shared memory + pub const CU_FUNC_CACHE_PREFER_EQUAL: CUfunc_cache_enum = CUfunc_cache_enum(3); +} +#[repr(transparent)] +/// Function cache configurations +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUfunc_cache_enum(pub ::core::ffi::c_uint); +/// Function cache configurations +pub use self::CUfunc_cache_enum as CUfunc_cache; +impl CUsharedconfig_enum { + ///< set default shared memory bank size + pub const CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum( + 0, + ); +} +impl CUsharedconfig_enum { + ///< set shared memory bank width to four bytes + pub const CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum( + 1, + ); +} +impl CUsharedconfig_enum { + ///< set shared memory bank width to eight bytes + pub const CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum( + 2, + ); +} +#[repr(transparent)] +/** \deprecated + + Shared memory configurations*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUsharedconfig_enum(pub ::core::ffi::c_uint); +/** \deprecated + + Shared memory configurations*/ +pub use self::CUsharedconfig_enum as CUsharedconfig; +impl CUshared_carveout_enum { + ///< No preference for shared memory or L1 (default) + pub const CU_SHAREDMEM_CARVEOUT_DEFAULT: CUshared_carveout_enum = CUshared_carveout_enum( + -1, + ); +} +impl CUshared_carveout_enum { + ///< Prefer maximum available shared memory, minimum L1 cache + pub const CU_SHAREDMEM_CARVEOUT_MAX_SHARED: CUshared_carveout_enum = CUshared_carveout_enum( + 100, + ); +} +impl CUshared_carveout_enum { + ///< Prefer maximum available L1 cache, minimum shared memory + pub const CU_SHAREDMEM_CARVEOUT_MAX_L1: CUshared_carveout_enum = CUshared_carveout_enum( + 0, + ); +} +#[repr(transparent)] +/// Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUshared_carveout_enum(pub ::core::ffi::c_int); +/// Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute +pub use self::CUshared_carveout_enum as CUshared_carveout; +impl CUmemorytype_enum { + ///< Host memory + pub const CU_MEMORYTYPE_HOST: CUmemorytype_enum = CUmemorytype_enum(1); +} +impl CUmemorytype_enum { + ///< Device memory + pub const CU_MEMORYTYPE_DEVICE: CUmemorytype_enum = CUmemorytype_enum(2); +} +impl CUmemorytype_enum { + ///< Array memory + pub const CU_MEMORYTYPE_ARRAY: CUmemorytype_enum = CUmemorytype_enum(3); +} +impl CUmemorytype_enum { + ///< Unified device or host memory + pub const CU_MEMORYTYPE_UNIFIED: CUmemorytype_enum = CUmemorytype_enum(4); +} +#[repr(transparent)] +/// Memory types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemorytype_enum(pub ::core::ffi::c_uint); +/// Memory types +pub use self::CUmemorytype_enum as CUmemorytype; +impl CUcomputemode_enum { + ///< Default compute mode (Multiple contexts allowed per device) + pub const CU_COMPUTEMODE_DEFAULT: CUcomputemode_enum = CUcomputemode_enum(0); +} +impl CUcomputemode_enum { + ///< Compute-prohibited mode (No contexts can be created on this device at this time) + pub const CU_COMPUTEMODE_PROHIBITED: CUcomputemode_enum = CUcomputemode_enum(2); +} +impl CUcomputemode_enum { + ///< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) + pub const CU_COMPUTEMODE_EXCLUSIVE_PROCESS: CUcomputemode_enum = CUcomputemode_enum( + 3, + ); +} +#[repr(transparent)] +/// Compute Modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUcomputemode_enum(pub ::core::ffi::c_uint); +/// Compute Modes +pub use self::CUcomputemode_enum as CUcomputemode; +impl CUmem_advise_enum { + ///< Data will mostly be read and only occasionally be written to + pub const CU_MEM_ADVISE_SET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(1); +} +impl CUmem_advise_enum { + ///< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY + pub const CU_MEM_ADVISE_UNSET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(2); +} +impl CUmem_advise_enum { + ///< Set the preferred location for the data as the specified device + pub const CU_MEM_ADVISE_SET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum( + 3, + ); +} +impl CUmem_advise_enum { + ///< Clear the preferred location for the data + pub const CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum( + 4, + ); +} +impl CUmem_advise_enum { + ///< Data will be accessed by the specified device, so prevent page faults as much as possible + pub const CU_MEM_ADVISE_SET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(5); +} +impl CUmem_advise_enum { + ///< Let the Unified Memory subsystem decide on the page faulting policy for the specified device + pub const CU_MEM_ADVISE_UNSET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(6); +} +#[repr(transparent)] +/// Memory advise values +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmem_advise_enum(pub ::core::ffi::c_uint); +/// Memory advise values +pub use self::CUmem_advise_enum as CUmem_advise; +impl CUmem_range_attribute_enum { + ///< Whether the range will mostly be read and only occasionally be written to + pub const CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 1, + ); +} +impl CUmem_range_attribute_enum { + ///< The preferred location of the range + pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 2, + ); +} +impl CUmem_range_attribute_enum { + ///< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device + pub const CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 3, + ); +} +impl CUmem_range_attribute_enum { + ///< The last location to which the range was prefetched + pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 4, + ); +} +impl CUmem_range_attribute_enum { + ///< The preferred location type of the range + pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 5, + ); +} +impl CUmem_range_attribute_enum { + ///< The preferred location id of the range + pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 6, + ); +} +impl CUmem_range_attribute_enum { + ///< The last location type to which the range was prefetched + pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 7, + ); +} +impl CUmem_range_attribute_enum { + ///< The last location id to which the range was prefetched + pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID: CUmem_range_attribute_enum = CUmem_range_attribute_enum( + 8, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmem_range_attribute_enum(pub ::core::ffi::c_uint); +pub use self::CUmem_range_attribute_enum as CUmem_range_attribute; +impl CUjit_option_enum { + /** Max number of registers that a thread may use.\n + Option type: unsigned int\n + Applies to: compiler only*/ + pub const CU_JIT_MAX_REGISTERS: CUjit_option_enum = CUjit_option_enum(0); +} +impl CUjit_option_enum { + /** IN: Specifies minimum number of threads per block to target compilation + for\n + OUT: Returns the number of threads the compiler actually targeted. + This restricts the resource utilization of the compiler (e.g. max + registers) such that a block with the given number of threads should be + able to launch based on register limitations. Note, this option does not + currently take into account any other resource limitations, such as + shared memory utilization.\n + Cannot be combined with ::CU_JIT_TARGET.\n + Option type: unsigned int\n + Applies to: compiler only*/ + pub const CU_JIT_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(1); +} +impl CUjit_option_enum { + /** Overwrites the option value with the total wall clock time, in + milliseconds, spent in the compiler and linker\n + Option type: float\n + Applies to: compiler and linker*/ + pub const CU_JIT_WALL_TIME: CUjit_option_enum = CUjit_option_enum(2); +} +impl CUjit_option_enum { + /** Pointer to a buffer in which to print any log messages + that are informational in nature (the buffer size is specified via + option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\n + Option type: char *\n + Applies to: compiler and linker*/ + pub const CU_JIT_INFO_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(3); +} +impl CUjit_option_enum { + /** IN: Log buffer size in bytes. Log messages will be capped at this size + (including null terminator)\n + OUT: Amount of log buffer filled with messages\n + Option type: unsigned int\n + Applies to: compiler and linker*/ + pub const CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum( + 4, + ); +} +impl CUjit_option_enum { + /** Pointer to a buffer in which to print any log messages that + reflect errors (the buffer size is specified via option + ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\n + Option type: char *\n + Applies to: compiler and linker*/ + pub const CU_JIT_ERROR_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(5); +} +impl CUjit_option_enum { + /** IN: Log buffer size in bytes. Log messages will be capped at this size + (including null terminator)\n + OUT: Amount of log buffer filled with messages\n + Option type: unsigned int\n + Applies to: compiler and linker*/ + pub const CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum( + 6, + ); +} +impl CUjit_option_enum { + /** Level of optimizations to apply to generated code (0 - 4), with 4 + being the default and highest level of optimizations.\n + Option type: unsigned int\n + Applies to: compiler only*/ + pub const CU_JIT_OPTIMIZATION_LEVEL: CUjit_option_enum = CUjit_option_enum(7); +} +impl CUjit_option_enum { + /** No option value required. Determines the target based on the current + attached context (default)\n + Option type: No option value needed\n + Applies to: compiler and linker*/ + pub const CU_JIT_TARGET_FROM_CUCONTEXT: CUjit_option_enum = CUjit_option_enum(8); +} +impl CUjit_option_enum { + /** Target is chosen based on supplied ::CUjit_target. Cannot be + combined with ::CU_JIT_THREADS_PER_BLOCK.\n + Option type: unsigned int for enumerated type ::CUjit_target\n + Applies to: compiler and linker*/ + pub const CU_JIT_TARGET: CUjit_option_enum = CUjit_option_enum(9); +} +impl CUjit_option_enum { + /** Specifies choice of fallback strategy if matching cubin is not found. + Choice is based on supplied ::CUjit_fallback. This option cannot be + used with cuLink* APIs as the linker requires exact matches.\n + Option type: unsigned int for enumerated type ::CUjit_fallback\n + Applies to: compiler only*/ + pub const CU_JIT_FALLBACK_STRATEGY: CUjit_option_enum = CUjit_option_enum(10); +} +impl CUjit_option_enum { + /** Specifies whether to create debug information in output (-g) + (0: false, default)\n + Option type: int\n + Applies to: compiler and linker*/ + pub const CU_JIT_GENERATE_DEBUG_INFO: CUjit_option_enum = CUjit_option_enum(11); +} +impl CUjit_option_enum { + /** Generate verbose log messages (0: false, default)\n + Option type: int\n + Applies to: compiler and linker*/ + pub const CU_JIT_LOG_VERBOSE: CUjit_option_enum = CUjit_option_enum(12); +} +impl CUjit_option_enum { + /** Generate line number information (-lineinfo) (0: false, default)\n + Option type: int\n + Applies to: compiler only*/ + pub const CU_JIT_GENERATE_LINE_INFO: CUjit_option_enum = CUjit_option_enum(13); +} +impl CUjit_option_enum { + /** Specifies whether to enable caching explicitly (-dlcm) \n + Choice is based on supplied ::CUjit_cacheMode_enum.\n + Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\n + Applies to: compiler only*/ + pub const CU_JIT_CACHE_MODE: CUjit_option_enum = CUjit_option_enum(14); +} +impl CUjit_option_enum { + /** \deprecated + This jit option is deprecated and should not be used.*/ + pub const CU_JIT_NEW_SM3X_OPT: CUjit_option_enum = CUjit_option_enum(15); +} +impl CUjit_option_enum { + /// This jit option is used for internal purpose only. + pub const CU_JIT_FAST_COMPILE: CUjit_option_enum = CUjit_option_enum(16); +} +impl CUjit_option_enum { + /** Array of device symbol names that will be relocated to the corresponding + host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\n + Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n + When loading a device module, driver will relocate all encountered + unresolved symbols to the host addresses.\n + It is only allowed to register symbols that correspond to unresolved + global variables.\n + It is illegal to register the same device symbol at multiple addresses.\n + Option type: const char **\n + Applies to: dynamic linker only*/ + pub const CU_JIT_GLOBAL_SYMBOL_NAMES: CUjit_option_enum = CUjit_option_enum(17); +} +impl CUjit_option_enum { + /** Array of host addresses that will be used to relocate corresponding + device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\n + Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n + Option type: void **\n + Applies to: dynamic linker only*/ + pub const CU_JIT_GLOBAL_SYMBOL_ADDRESSES: CUjit_option_enum = CUjit_option_enum(18); +} +impl CUjit_option_enum { + /** Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and + ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\n + Option type: unsigned int\n + Applies to: dynamic linker only*/ + pub const CU_JIT_GLOBAL_SYMBOL_COUNT: CUjit_option_enum = CUjit_option_enum(19); +} +impl CUjit_option_enum { + /** \deprecated + Enable link-time optimization (-dlto) for device code (Disabled by default).\n + This option is not supported on 32-bit platforms.\n + Option type: int\n + Applies to: compiler and linker + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_LTO: CUjit_option_enum = CUjit_option_enum(20); +} +impl CUjit_option_enum { + /** \deprecated + Control single-precision denormals (-ftz) support (0: false, default). + 1 : flushes denormal values to zero + 0 : preserves denormal values + Option type: int\n + Applies to: link-time optimization specified with CU_JIT_LTO + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_FTZ: CUjit_option_enum = CUjit_option_enum(21); +} +impl CUjit_option_enum { + /** \deprecated + Control single-precision floating-point division and reciprocals + (-prec-div) support (1: true, default). + 1 : Enables the IEEE round-to-nearest mode + 0 : Enables the fast approximation mode + Option type: int\n + Applies to: link-time optimization specified with CU_JIT_LTO + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_PREC_DIV: CUjit_option_enum = CUjit_option_enum(22); +} +impl CUjit_option_enum { + /** \deprecated + Control single-precision floating-point square root + (-prec-sqrt) support (1: true, default). + 1 : Enables the IEEE round-to-nearest mode + 0 : Enables the fast approximation mode + Option type: int\n + Applies to: link-time optimization specified with CU_JIT_LTO + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_PREC_SQRT: CUjit_option_enum = CUjit_option_enum(23); +} +impl CUjit_option_enum { + /** \deprecated + Enable/Disable the contraction of floating-point multiplies + and adds/subtracts into floating-point multiply-add (-fma) + operations (1: Enable, default; 0: Disable). + Option type: int\n + Applies to: link-time optimization specified with CU_JIT_LTO + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_FMA: CUjit_option_enum = CUjit_option_enum(24); +} +impl CUjit_option_enum { + /** \deprecated + Array of kernel names that should be preserved at link time while others + can be removed.\n + Must contain ::CU_JIT_REFERENCED_KERNEL_COUNT entries.\n + Note that kernel names can be mangled by the compiler in which case the + mangled name needs to be specified.\n + Wildcard "*" can be used to represent zero or more characters instead of + specifying the full or mangled name.\n + It is important to note that the wildcard "*" is also added implicitly. + For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and + thus preserve all kernels with those names. This can be avoided by providing + a more specific name like "barfoobaz".\n + Option type: const char **\n + Applies to: dynamic linker only + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_REFERENCED_KERNEL_NAMES: CUjit_option_enum = CUjit_option_enum(25); +} +impl CUjit_option_enum { + /** \deprecated + Number of entries in ::CU_JIT_REFERENCED_KERNEL_NAMES array.\n + Option type: unsigned int\n + Applies to: dynamic linker only + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_REFERENCED_KERNEL_COUNT: CUjit_option_enum = CUjit_option_enum(26); +} +impl CUjit_option_enum { + /** \deprecated + Array of variable names (__device__ and/or __constant__) that should be + preserved at link time while others can be removed.\n + Must contain ::CU_JIT_REFERENCED_VARIABLE_COUNT entries.\n + Note that variable names can be mangled by the compiler in which case the + mangled name needs to be specified.\n + Wildcard "*" can be used to represent zero or more characters instead of + specifying the full or mangled name.\n + It is important to note that the wildcard "*" is also added implicitly. + For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and + thus preserve all variables with those names. This can be avoided by providing + a more specific name like "barfoobaz".\n + Option type: const char **\n + Applies to: link-time optimization specified with CU_JIT_LTO + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_REFERENCED_VARIABLE_NAMES: CUjit_option_enum = CUjit_option_enum( + 27, + ); +} +impl CUjit_option_enum { + /** \deprecated + Number of entries in ::CU_JIT_REFERENCED_VARIABLE_NAMES array.\n + Option type: unsigned int\n + Applies to: link-time optimization specified with CU_JIT_LTO + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_REFERENCED_VARIABLE_COUNT: CUjit_option_enum = CUjit_option_enum( + 28, + ); +} +impl CUjit_option_enum { + /** \deprecated + This option serves as a hint to enable the JIT compiler/linker + to remove constant (__constant__) and device (__device__) variables + unreferenced in device code (Disabled by default).\n + Note that host references to constant and device variables using APIs like + ::cuModuleGetGlobal() with this option specified may result in undefined behavior unless + the variables are explicitly specified using ::CU_JIT_REFERENCED_VARIABLE_NAMES.\n + Option type: int\n + Applies to: link-time optimization specified with CU_JIT_LTO + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES: CUjit_option_enum = CUjit_option_enum( + 29, + ); +} +impl CUjit_option_enum { + /** Generate position independent code (0: false)\n + Option type: int\n + Applies to: compiler only*/ + pub const CU_JIT_POSITION_INDEPENDENT_CODE: CUjit_option_enum = CUjit_option_enum( + 30, + ); +} +impl CUjit_option_enum { + /** This option hints to the JIT compiler the minimum number of CTAs from the + kernel’s grid to be mapped to a SM. This option is ignored when used together + with ::CU_JIT_MAX_REGISTERS or ::CU_JIT_THREADS_PER_BLOCK. + Optimizations based on this option need ::CU_JIT_MAX_THREADS_PER_BLOCK to + be specified as well. For kernels already using PTX directive .minnctapersm, + this option will be ignored by default. Use ::CU_JIT_OVERRIDE_DIRECTIVE_VALUES + to let this option take precedence over the PTX directive. + Option type: unsigned int\n + Applies to: compiler only*/ + pub const CU_JIT_MIN_CTA_PER_SM: CUjit_option_enum = CUjit_option_enum(31); +} +impl CUjit_option_enum { + /** Maximum number threads in a thread block, computed as the product of + the maximum extent specifed for each dimension of the block. This limit + is guaranteed not to be exeeded in any invocation of the kernel. Exceeding + the the maximum number of threads results in runtime error or kernel launch + failure. For kernels already using PTX directive .maxntid, this option will + be ignored by default. Use ::CU_JIT_OVERRIDE_DIRECTIVE_VALUES to let this + option take precedence over the PTX directive. + Option type: int\n + Applies to: compiler only*/ + pub const CU_JIT_MAX_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(32); +} +impl CUjit_option_enum { + /** This option lets the values specified using ::CU_JIT_MAX_REGISTERS, + ::CU_JIT_THREADS_PER_BLOCK, ::CU_JIT_MAX_THREADS_PER_BLOCK and + ::CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives. + (0: Disable, default; 1: Enable) + Option type: int\n + Applies to: compiler only*/ + pub const CU_JIT_OVERRIDE_DIRECTIVE_VALUES: CUjit_option_enum = CUjit_option_enum( + 33, + ); +} +impl CUjit_option_enum { + /** This option lets the values specified using ::CU_JIT_MAX_REGISTERS, + ::CU_JIT_THREADS_PER_BLOCK, ::CU_JIT_MAX_THREADS_PER_BLOCK and + ::CU_JIT_MIN_CTA_PER_SM take precedence over any PTX directives. + (0: Disable, default; 1: Enable) + Option type: int\n + Applies to: compiler only*/ + pub const CU_JIT_NUM_OPTIONS: CUjit_option_enum = CUjit_option_enum(34); +} +#[repr(transparent)] +/// Online compiler and linker options +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUjit_option_enum(pub ::core::ffi::c_uint); +/// Online compiler and linker options +pub use self::CUjit_option_enum as CUjit_option; +impl CUjit_target_enum { + ///< Compute device class 3.0 + pub const CU_TARGET_COMPUTE_30: CUjit_target_enum = CUjit_target_enum(30); +} +impl CUjit_target_enum { + ///< Compute device class 3.2 + pub const CU_TARGET_COMPUTE_32: CUjit_target_enum = CUjit_target_enum(32); +} +impl CUjit_target_enum { + ///< Compute device class 3.5 + pub const CU_TARGET_COMPUTE_35: CUjit_target_enum = CUjit_target_enum(35); +} +impl CUjit_target_enum { + ///< Compute device class 3.7 + pub const CU_TARGET_COMPUTE_37: CUjit_target_enum = CUjit_target_enum(37); +} +impl CUjit_target_enum { + ///< Compute device class 5.0 + pub const CU_TARGET_COMPUTE_50: CUjit_target_enum = CUjit_target_enum(50); +} +impl CUjit_target_enum { + ///< Compute device class 5.2 + pub const CU_TARGET_COMPUTE_52: CUjit_target_enum = CUjit_target_enum(52); +} +impl CUjit_target_enum { + ///< Compute device class 5.3 + pub const CU_TARGET_COMPUTE_53: CUjit_target_enum = CUjit_target_enum(53); +} +impl CUjit_target_enum { + ///< Compute device class 6.0. + pub const CU_TARGET_COMPUTE_60: CUjit_target_enum = CUjit_target_enum(60); +} +impl CUjit_target_enum { + ///< Compute device class 6.1. + pub const CU_TARGET_COMPUTE_61: CUjit_target_enum = CUjit_target_enum(61); +} +impl CUjit_target_enum { + ///< Compute device class 6.2. + pub const CU_TARGET_COMPUTE_62: CUjit_target_enum = CUjit_target_enum(62); +} +impl CUjit_target_enum { + ///< Compute device class 7.0. + pub const CU_TARGET_COMPUTE_70: CUjit_target_enum = CUjit_target_enum(70); +} +impl CUjit_target_enum { + ///< Compute device class 7.2. + pub const CU_TARGET_COMPUTE_72: CUjit_target_enum = CUjit_target_enum(72); +} +impl CUjit_target_enum { + ///< Compute device class 7.5. + pub const CU_TARGET_COMPUTE_75: CUjit_target_enum = CUjit_target_enum(75); +} +impl CUjit_target_enum { + ///< Compute device class 8.0. + pub const CU_TARGET_COMPUTE_80: CUjit_target_enum = CUjit_target_enum(80); +} +impl CUjit_target_enum { + ///< Compute device class 8.6. + pub const CU_TARGET_COMPUTE_86: CUjit_target_enum = CUjit_target_enum(86); +} +impl CUjit_target_enum { + ///< Compute device class 8.7. + pub const CU_TARGET_COMPUTE_87: CUjit_target_enum = CUjit_target_enum(87); +} +impl CUjit_target_enum { + ///< Compute device class 8.9. + pub const CU_TARGET_COMPUTE_89: CUjit_target_enum = CUjit_target_enum(89); +} +impl CUjit_target_enum { + ///< Compute device class 9.0. + pub const CU_TARGET_COMPUTE_90: CUjit_target_enum = CUjit_target_enum(90); +} +impl CUjit_target_enum { + pub const CU_TARGET_COMPUTE_90A: CUjit_target_enum = CUjit_target_enum(65626); +} +#[repr(transparent)] +/// Online compilation targets +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUjit_target_enum(pub ::core::ffi::c_uint); +/// Online compilation targets +pub use self::CUjit_target_enum as CUjit_target; +impl CUjit_fallback_enum { + ///< Prefer to compile ptx if exact binary match not found + pub const CU_PREFER_PTX: CUjit_fallback_enum = CUjit_fallback_enum(0); +} +impl CUjit_fallback_enum { + ///< Prefer to fall back to compatible binary code if exact match not found + pub const CU_PREFER_BINARY: CUjit_fallback_enum = CUjit_fallback_enum(1); +} +#[repr(transparent)] +/// Cubin matching fallback strategies +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUjit_fallback_enum(pub ::core::ffi::c_uint); +/// Cubin matching fallback strategies +pub use self::CUjit_fallback_enum as CUjit_fallback; +impl CUjit_cacheMode_enum { + ///< Compile with no -dlcm flag specified + pub const CU_JIT_CACHE_OPTION_NONE: CUjit_cacheMode_enum = CUjit_cacheMode_enum(0); +} +impl CUjit_cacheMode_enum { + ///< Compile with L1 cache disabled + pub const CU_JIT_CACHE_OPTION_CG: CUjit_cacheMode_enum = CUjit_cacheMode_enum(1); +} +impl CUjit_cacheMode_enum { + ///< Compile with L1 cache enabled + pub const CU_JIT_CACHE_OPTION_CA: CUjit_cacheMode_enum = CUjit_cacheMode_enum(2); +} +#[repr(transparent)] +/// Caching modes for dlcm +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUjit_cacheMode_enum(pub ::core::ffi::c_uint); +/// Caching modes for dlcm +pub use self::CUjit_cacheMode_enum as CUjit_cacheMode; +impl CUjitInputType_enum { + /** Compiled device-class-specific device code\n + Applicable options: none*/ + pub const CU_JIT_INPUT_CUBIN: CUjitInputType_enum = CUjitInputType_enum(0); +} +impl CUjitInputType_enum { + /** PTX source code\n + Applicable options: PTX compiler options*/ + pub const CU_JIT_INPUT_PTX: CUjitInputType_enum = CUjitInputType_enum(1); +} +impl CUjitInputType_enum { + /** Bundle of multiple cubins and/or PTX of some device code\n + Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/ + pub const CU_JIT_INPUT_FATBINARY: CUjitInputType_enum = CUjitInputType_enum(2); +} +impl CUjitInputType_enum { + /** Host object with embedded device code\n + Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/ + pub const CU_JIT_INPUT_OBJECT: CUjitInputType_enum = CUjitInputType_enum(3); +} +impl CUjitInputType_enum { + /** Archive of host objects with embedded device code\n + Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY*/ + pub const CU_JIT_INPUT_LIBRARY: CUjitInputType_enum = CUjitInputType_enum(4); +} +impl CUjitInputType_enum { + /** \deprecated + High-level intermediate code for link-time optimization\n + Applicable options: NVVM compiler options, PTX compiler options + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_INPUT_NVVM: CUjitInputType_enum = CUjitInputType_enum(5); +} +impl CUjitInputType_enum { + /** \deprecated + High-level intermediate code for link-time optimization\n + Applicable options: NVVM compiler options, PTX compiler options + + Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0*/ + pub const CU_JIT_NUM_INPUT_TYPES: CUjitInputType_enum = CUjitInputType_enum(6); +} +#[repr(transparent)] +/// Device code formats +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUjitInputType_enum(pub ::core::ffi::c_uint); +/// Device code formats +pub use self::CUjitInputType_enum as CUjitInputType; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUlinkState_st { + _unused: [u8; 0], +} +pub type CUlinkState = *mut CUlinkState_st; +impl CUgraphicsRegisterFlags_enum { + pub const CU_GRAPHICS_REGISTER_FLAGS_NONE: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum( + 0, + ); +} +impl CUgraphicsRegisterFlags_enum { + pub const CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum( + 1, + ); +} +impl CUgraphicsRegisterFlags_enum { + pub const CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum( + 2, + ); +} +impl CUgraphicsRegisterFlags_enum { + pub const CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum( + 4, + ); +} +impl CUgraphicsRegisterFlags_enum { + pub const CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER: CUgraphicsRegisterFlags_enum = CUgraphicsRegisterFlags_enum( + 8, + ); +} +#[repr(transparent)] +/// Flags to register a graphics resource +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphicsRegisterFlags_enum(pub ::core::ffi::c_uint); +/// Flags to register a graphics resource +pub use self::CUgraphicsRegisterFlags_enum as CUgraphicsRegisterFlags; +impl CUgraphicsMapResourceFlags_enum { + pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum( + 0, + ); +} +impl CUgraphicsMapResourceFlags_enum { + pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum( + 1, + ); +} +impl CUgraphicsMapResourceFlags_enum { + pub const CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: CUgraphicsMapResourceFlags_enum = CUgraphicsMapResourceFlags_enum( + 2, + ); +} +#[repr(transparent)] +/// Flags for mapping and unmapping interop resources +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphicsMapResourceFlags_enum(pub ::core::ffi::c_uint); +/// Flags for mapping and unmapping interop resources +pub use self::CUgraphicsMapResourceFlags_enum as CUgraphicsMapResourceFlags; +impl CUarray_cubemap_face_enum { + ///< Positive X face of cubemap + pub const CU_CUBEMAP_FACE_POSITIVE_X: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum( + 0, + ); +} +impl CUarray_cubemap_face_enum { + ///< Negative X face of cubemap + pub const CU_CUBEMAP_FACE_NEGATIVE_X: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum( + 1, + ); +} +impl CUarray_cubemap_face_enum { + ///< Positive Y face of cubemap + pub const CU_CUBEMAP_FACE_POSITIVE_Y: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum( + 2, + ); +} +impl CUarray_cubemap_face_enum { + ///< Negative Y face of cubemap + pub const CU_CUBEMAP_FACE_NEGATIVE_Y: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum( + 3, + ); +} +impl CUarray_cubemap_face_enum { + ///< Positive Z face of cubemap + pub const CU_CUBEMAP_FACE_POSITIVE_Z: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum( + 4, + ); +} +impl CUarray_cubemap_face_enum { + ///< Negative Z face of cubemap + pub const CU_CUBEMAP_FACE_NEGATIVE_Z: CUarray_cubemap_face_enum = CUarray_cubemap_face_enum( + 5, + ); +} +#[repr(transparent)] +/// Array indices for cube faces +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUarray_cubemap_face_enum(pub ::core::ffi::c_uint); +/// Array indices for cube faces +pub use self::CUarray_cubemap_face_enum as CUarray_cubemap_face; +impl CUlimit_enum { + ///< GPU thread stack size + pub const CU_LIMIT_STACK_SIZE: CUlimit_enum = CUlimit_enum(0); +} +impl CUlimit_enum { + ///< GPU printf FIFO size + pub const CU_LIMIT_PRINTF_FIFO_SIZE: CUlimit_enum = CUlimit_enum(1); +} +impl CUlimit_enum { + ///< GPU malloc heap size + pub const CU_LIMIT_MALLOC_HEAP_SIZE: CUlimit_enum = CUlimit_enum(2); +} +impl CUlimit_enum { + ///< GPU device runtime launch synchronize depth + pub const CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: CUlimit_enum = CUlimit_enum(3); +} +impl CUlimit_enum { + ///< GPU device runtime pending launch count + pub const CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: CUlimit_enum = CUlimit_enum(4); +} +impl CUlimit_enum { + ///< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint + pub const CU_LIMIT_MAX_L2_FETCH_GRANULARITY: CUlimit_enum = CUlimit_enum(5); +} +impl CUlimit_enum { + ///< A size in bytes for L2 persisting lines cache size + pub const CU_LIMIT_PERSISTING_L2_CACHE_SIZE: CUlimit_enum = CUlimit_enum(6); +} +impl CUlimit_enum { + pub const CU_LIMIT_MAX: CUlimit_enum = CUlimit_enum(7); +} +#[repr(transparent)] +/// Limits +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlimit_enum(pub ::core::ffi::c_uint); +/// Limits +pub use self::CUlimit_enum as CUlimit; +impl CUresourcetype_enum { + ///< Array resource + pub const CU_RESOURCE_TYPE_ARRAY: CUresourcetype_enum = CUresourcetype_enum(0); +} +impl CUresourcetype_enum { + ///< Mipmapped array resource + pub const CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: CUresourcetype_enum = CUresourcetype_enum( + 1, + ); +} +impl CUresourcetype_enum { + ///< Linear resource + pub const CU_RESOURCE_TYPE_LINEAR: CUresourcetype_enum = CUresourcetype_enum(2); +} +impl CUresourcetype_enum { + ///< Pitch 2D resource + pub const CU_RESOURCE_TYPE_PITCH2D: CUresourcetype_enum = CUresourcetype_enum(3); +} +#[repr(transparent)] +/// Resource types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUresourcetype_enum(pub ::core::ffi::c_uint); +/// Resource types +pub use self::CUresourcetype_enum as CUresourcetype; +/** CUDA host function + \param userData Argument value passed to the function*/ +pub type CUhostFn = ::core::option::Option< + unsafe extern "system" fn(userData: *mut ::core::ffi::c_void), +>; +impl CUaccessProperty_enum { + ///< Normal cache persistence. + pub const CU_ACCESS_PROPERTY_NORMAL: CUaccessProperty_enum = CUaccessProperty_enum( + 0, + ); +} +impl CUaccessProperty_enum { + ///< Streaming access is less likely to persit from cache. + pub const CU_ACCESS_PROPERTY_STREAMING: CUaccessProperty_enum = CUaccessProperty_enum( + 1, + ); +} +impl CUaccessProperty_enum { + ///< Persisting access is more likely to persist in cache. + pub const CU_ACCESS_PROPERTY_PERSISTING: CUaccessProperty_enum = CUaccessProperty_enum( + 2, + ); +} +#[repr(transparent)] +/// Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUaccessProperty_enum(pub ::core::ffi::c_uint); +/// Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members. +pub use self::CUaccessProperty_enum as CUaccessProperty; +/** Specifies an access policy for a window, a contiguous extent of memory + beginning at base_ptr and ending at base_ptr + num_bytes. + num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. + Partition into many segments and assign segments such that: + sum of "hit segments" / window == approx. ratio. + sum of "miss segments" / window == approx 1-ratio. + Segments and ratio specifications are fitted to the capabilities of + the architecture. + Accesses in a hit segment apply the hitProp access policy. + Accesses in a miss segment apply the missProp access policy.*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct CUaccessPolicyWindow_st { + ///< Starting address of the access policy window. CUDA driver may align it. + pub base_ptr: *mut ::core::ffi::c_void, + ///< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment. + pub num_bytes: usize, + ///< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp. + pub hitRatio: f32, + ///< ::CUaccessProperty set for hit. + pub hitProp: CUaccessProperty, + ///< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING + pub missProp: CUaccessProperty, +} +/** Specifies an access policy for a window, a contiguous extent of memory + beginning at base_ptr and ending at base_ptr + num_bytes. + num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. + Partition into many segments and assign segments such that: + sum of "hit segments" / window == approx. ratio. + sum of "miss segments" / window == approx 1-ratio. + Segments and ratio specifications are fitted to the capabilities of + the architecture. + Accesses in a hit segment apply the hitProp access policy. + Accesses in a miss segment apply the missProp access policy.*/ +pub type CUaccessPolicyWindow_v1 = CUaccessPolicyWindow_st; +/// Access policy window +pub type CUaccessPolicyWindow = CUaccessPolicyWindow_v1; +/// GPU kernel node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_KERNEL_NODE_PARAMS_st { + ///< Kernel to launch + pub func: CUfunction, + ///< Width of grid in blocks + pub gridDimX: ::core::ffi::c_uint, + ///< Height of grid in blocks + pub gridDimY: ::core::ffi::c_uint, + ///< Depth of grid in blocks + pub gridDimZ: ::core::ffi::c_uint, + ///< X dimension of each thread block + pub blockDimX: ::core::ffi::c_uint, + ///< Y dimension of each thread block + pub blockDimY: ::core::ffi::c_uint, + ///< Z dimension of each thread block + pub blockDimZ: ::core::ffi::c_uint, + ///< Dynamic shared-memory size per thread block in bytes + pub sharedMemBytes: ::core::ffi::c_uint, + ///< Array of pointers to kernel parameters + pub kernelParams: *mut *mut ::core::ffi::c_void, + ///< Extra options + pub extra: *mut *mut ::core::ffi::c_void, +} +/// GPU kernel node parameters +pub type CUDA_KERNEL_NODE_PARAMS_v1 = CUDA_KERNEL_NODE_PARAMS_st; +/// GPU kernel node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_KERNEL_NODE_PARAMS_v2_st { + ///< Kernel to launch + pub func: CUfunction, + ///< Width of grid in blocks + pub gridDimX: ::core::ffi::c_uint, + ///< Height of grid in blocks + pub gridDimY: ::core::ffi::c_uint, + ///< Depth of grid in blocks + pub gridDimZ: ::core::ffi::c_uint, + ///< X dimension of each thread block + pub blockDimX: ::core::ffi::c_uint, + ///< Y dimension of each thread block + pub blockDimY: ::core::ffi::c_uint, + ///< Z dimension of each thread block + pub blockDimZ: ::core::ffi::c_uint, + ///< Dynamic shared-memory size per thread block in bytes + pub sharedMemBytes: ::core::ffi::c_uint, + ///< Array of pointers to kernel parameters + pub kernelParams: *mut *mut ::core::ffi::c_void, + ///< Extra options + pub extra: *mut *mut ::core::ffi::c_void, + ///< Kernel to launch, will only be referenced if func is NULL + pub kern: CUkernel, + ///< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set. + pub ctx: CUcontext, +} +/// GPU kernel node parameters +pub type CUDA_KERNEL_NODE_PARAMS_v2 = CUDA_KERNEL_NODE_PARAMS_v2_st; +/// GPU kernel node parameters +pub type CUDA_KERNEL_NODE_PARAMS = CUDA_KERNEL_NODE_PARAMS_v2; +/// GPU kernel node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_KERNEL_NODE_PARAMS_v3_st { + ///< Kernel to launch + pub func: CUfunction, + ///< Width of grid in blocks + pub gridDimX: ::core::ffi::c_uint, + ///< Height of grid in blocks + pub gridDimY: ::core::ffi::c_uint, + ///< Depth of grid in blocks + pub gridDimZ: ::core::ffi::c_uint, + ///< X dimension of each thread block + pub blockDimX: ::core::ffi::c_uint, + ///< Y dimension of each thread block + pub blockDimY: ::core::ffi::c_uint, + ///< Z dimension of each thread block + pub blockDimZ: ::core::ffi::c_uint, + ///< Dynamic shared-memory size per thread block in bytes + pub sharedMemBytes: ::core::ffi::c_uint, + ///< Array of pointers to kernel parameters + pub kernelParams: *mut *mut ::core::ffi::c_void, + ///< Extra options + pub extra: *mut *mut ::core::ffi::c_void, + ///< Kernel to launch, will only be referenced if func is NULL + pub kern: CUkernel, + ///< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set. + pub ctx: CUcontext, +} +/// GPU kernel node parameters +pub type CUDA_KERNEL_NODE_PARAMS_v3 = CUDA_KERNEL_NODE_PARAMS_v3_st; +/// Memset node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMSET_NODE_PARAMS_st { + ///< Destination device pointer + pub dst: CUdeviceptr, + ///< Pitch of destination device pointer. Unused if height is 1 + pub pitch: usize, + ///< Value to be set + pub value: ::core::ffi::c_uint, + ///< Size of each element in bytes. Must be 1, 2, or 4. + pub elementSize: ::core::ffi::c_uint, + ///< Width of the row in elements + pub width: usize, + ///< Number of rows + pub height: usize, +} +/// Memset node parameters +pub type CUDA_MEMSET_NODE_PARAMS_v1 = CUDA_MEMSET_NODE_PARAMS_st; +/// Memset node parameters +pub type CUDA_MEMSET_NODE_PARAMS = CUDA_MEMSET_NODE_PARAMS_v1; +/// Memset node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMSET_NODE_PARAMS_v2_st { + ///< Destination device pointer + pub dst: CUdeviceptr, + ///< Pitch of destination device pointer. Unused if height is 1 + pub pitch: usize, + ///< Value to be set + pub value: ::core::ffi::c_uint, + ///< Size of each element in bytes. Must be 1, 2, or 4. + pub elementSize: ::core::ffi::c_uint, + ///< Width of the row in elements + pub width: usize, + ///< Number of rows + pub height: usize, + ///< Context on which to run the node + pub ctx: CUcontext, +} +/// Memset node parameters +pub type CUDA_MEMSET_NODE_PARAMS_v2 = CUDA_MEMSET_NODE_PARAMS_v2_st; +/// Host node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash)] +pub struct CUDA_HOST_NODE_PARAMS_st { + ///< The function to call when the node executes + pub fn_: CUhostFn, + ///< Argument to pass to the function + pub userData: *mut ::core::ffi::c_void, +} +/// Host node parameters +pub type CUDA_HOST_NODE_PARAMS_v1 = CUDA_HOST_NODE_PARAMS_st; +/// Host node parameters +pub type CUDA_HOST_NODE_PARAMS = CUDA_HOST_NODE_PARAMS_v1; +/// Host node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_HOST_NODE_PARAMS_v2_st { + ///< The function to call when the node executes + pub fn_: CUhostFn, + ///< Argument to pass to the function + pub userData: *mut ::core::ffi::c_void, +} +/// Host node parameters +pub type CUDA_HOST_NODE_PARAMS_v2 = CUDA_HOST_NODE_PARAMS_v2_st; +impl CUgraphConditionalNodeType_enum { + ///< Conditional 'if' Node. Body executed once if condition value is non-zero. + pub const CU_GRAPH_COND_TYPE_IF: CUgraphConditionalNodeType_enum = CUgraphConditionalNodeType_enum( + 0, + ); +} +impl CUgraphConditionalNodeType_enum { + ///< Conditional 'while' Node. Body executed repeatedly while condition value is non-zero. + pub const CU_GRAPH_COND_TYPE_WHILE: CUgraphConditionalNodeType_enum = CUgraphConditionalNodeType_enum( + 1, + ); +} +#[repr(transparent)] +/// Conditional node types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphConditionalNodeType_enum(pub ::core::ffi::c_uint); +/// Conditional node types +pub use self::CUgraphConditionalNodeType_enum as CUgraphConditionalNodeType; +/// Conditional node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_CONDITIONAL_NODE_PARAMS { + /**< Conditional node handle. +Handles must be created in advance of creating the node +using ::cuGraphConditionalHandleCreate.*/ + pub handle: CUgraphConditionalHandle, + ///< Type of conditional node. + pub type_: CUgraphConditionalNodeType, + ///< Size of graph output array. Must be 1. + pub size: ::core::ffi::c_uint, + /**< CUDA-owned array populated with conditional node child graphs during creation of the node. +Valid for the lifetime of the conditional node. +The contents of the graph(s) are subject to the following constraints: + +- Allowed node types are kernel nodes, empty nodes, child graphs, memsets, +memcopies, and conditionals. This applies recursively to child graphs and conditional bodies. +- All kernels, including kernels in nested conditionals or child graphs at any level, +must belong to the same CUDA context. + +These graphs may be populated using graph node creation APIs or ::cuStreamBeginCaptureToGraph.*/ + pub phGraph_out: *mut CUgraph, + ///< Context on which to run the node. Must match context used to create the handle and all body nodes. + pub ctx: CUcontext, +} +impl CUgraphNodeType_enum { + ///< GPU kernel node + pub const CU_GRAPH_NODE_TYPE_KERNEL: CUgraphNodeType_enum = CUgraphNodeType_enum(0); +} +impl CUgraphNodeType_enum { + ///< Memcpy node + pub const CU_GRAPH_NODE_TYPE_MEMCPY: CUgraphNodeType_enum = CUgraphNodeType_enum(1); +} +impl CUgraphNodeType_enum { + ///< Memset node + pub const CU_GRAPH_NODE_TYPE_MEMSET: CUgraphNodeType_enum = CUgraphNodeType_enum(2); +} +impl CUgraphNodeType_enum { + ///< Host (executable) node + pub const CU_GRAPH_NODE_TYPE_HOST: CUgraphNodeType_enum = CUgraphNodeType_enum(3); +} +impl CUgraphNodeType_enum { + ///< Node which executes an embedded graph + pub const CU_GRAPH_NODE_TYPE_GRAPH: CUgraphNodeType_enum = CUgraphNodeType_enum(4); +} +impl CUgraphNodeType_enum { + ///< Empty (no-op) node + pub const CU_GRAPH_NODE_TYPE_EMPTY: CUgraphNodeType_enum = CUgraphNodeType_enum(5); +} +impl CUgraphNodeType_enum { + ///< External event wait node + pub const CU_GRAPH_NODE_TYPE_WAIT_EVENT: CUgraphNodeType_enum = CUgraphNodeType_enum( + 6, + ); +} +impl CUgraphNodeType_enum { + ///< External event record node + pub const CU_GRAPH_NODE_TYPE_EVENT_RECORD: CUgraphNodeType_enum = CUgraphNodeType_enum( + 7, + ); +} +impl CUgraphNodeType_enum { + ///< External semaphore signal node + pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL: CUgraphNodeType_enum = CUgraphNodeType_enum( + 8, + ); +} +impl CUgraphNodeType_enum { + ///< External semaphore wait node + pub const CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT: CUgraphNodeType_enum = CUgraphNodeType_enum( + 9, + ); +} +impl CUgraphNodeType_enum { + ///< Memory Allocation Node + pub const CU_GRAPH_NODE_TYPE_MEM_ALLOC: CUgraphNodeType_enum = CUgraphNodeType_enum( + 10, + ); +} +impl CUgraphNodeType_enum { + ///< Memory Free Node + pub const CU_GRAPH_NODE_TYPE_MEM_FREE: CUgraphNodeType_enum = CUgraphNodeType_enum( + 11, + ); +} +impl CUgraphNodeType_enum { + ///< Batch MemOp Node + pub const CU_GRAPH_NODE_TYPE_BATCH_MEM_OP: CUgraphNodeType_enum = CUgraphNodeType_enum( + 12, + ); +} +impl CUgraphNodeType_enum { + /**< Conditional Node + +May be used to implement a conditional execution path or loop +inside of a graph. The graph(s) contained within the body of the conditional node +can be selectively executed or iterated upon based on the value of a conditional +variable. + +Handles must be created in advance of creating the node +using ::cuGraphConditionalHandleCreate. + +The following restrictions apply to graphs which contain conditional nodes: +The graph cannot be used in a child node. +Only one instantiation of the graph may exist at any point in time. +The graph cannot be cloned. + +To set the control value, supply a default value when creating the handle and/or +call ::cudaGraphSetConditional from device code.*/ + pub const CU_GRAPH_NODE_TYPE_CONDITIONAL: CUgraphNodeType_enum = CUgraphNodeType_enum( + 13, + ); +} +#[repr(transparent)] +/// Graph node types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphNodeType_enum(pub ::core::ffi::c_uint); +/// Graph node types +pub use self::CUgraphNodeType_enum as CUgraphNodeType; +impl CUgraphDependencyType_enum { + ///< This is an ordinary dependency. + pub const CU_GRAPH_DEPENDENCY_TYPE_DEFAULT: CUgraphDependencyType_enum = CUgraphDependencyType_enum( + 0, + ); +} +impl CUgraphDependencyType_enum { + /**< This dependency type allows the downstream node to +use \c cudaGridDependencySynchronize(). It may only be used +between kernel nodes, and must be used with either the +::CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC or +::CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER outgoing port.*/ + pub const CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC: CUgraphDependencyType_enum = CUgraphDependencyType_enum( + 1, + ); +} +#[repr(transparent)] +/// Type annotations that can be applied to graph edges as part of ::CUgraphEdgeData. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphDependencyType_enum(pub ::core::ffi::c_uint); +/// Type annotations that can be applied to graph edges as part of ::CUgraphEdgeData. +pub use self::CUgraphDependencyType_enum as CUgraphDependencyType; +/** Optional annotation for edges in a CUDA graph. Note, all edges implicitly have annotations and + default to a zero-initialized value if not specified. A zero-initialized struct indicates a + standard full serialization of two nodes with memory visibility.*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphEdgeData_st { + /**< This indicates when the dependency is triggered from the upstream +node on the edge. The meaning is specfic to the node type. A value +of 0 in all cases means full completion of the upstream node, with +memory visibility to the downstream node or portion thereof +(indicated by \c to_port). +
+Only kernel nodes define non-zero ports. A kernel node +can use the following output port types: +::CU_GRAPH_KERNEL_NODE_PORT_DEFAULT, ::CU_GRAPH_KERNEL_NODE_PORT_PROGRAMMATIC, +or ::CU_GRAPH_KERNEL_NODE_PORT_LAUNCH_ORDER.*/ + pub from_port: ::core::ffi::c_uchar, + /**< This indicates what portion of the downstream node is dependent on +the upstream node or portion thereof (indicated by \c from_port). The +meaning is specific to the node type. A value of 0 in all cases means +the entirety of the downstream node is dependent on the upstream work. +
+Currently no node types define non-zero ports. Accordingly, this field +must be set to zero.*/ + pub to_port: ::core::ffi::c_uchar, + /**< This should be populated with a value from ::CUgraphDependencyType. (It +is typed as char due to compiler-specific layout of bitfields.) See +::CUgraphDependencyType.*/ + pub type_: ::core::ffi::c_uchar, + /**< These bytes are unused and must be zeroed. This ensures +compatibility if additional fields are added in the future.*/ + pub reserved: [::core::ffi::c_uchar; 5usize], +} +/** Optional annotation for edges in a CUDA graph. Note, all edges implicitly have annotations and + default to a zero-initialized value if not specified. A zero-initialized struct indicates a + standard full serialization of two nodes with memory visibility.*/ +pub type CUgraphEdgeData = CUgraphEdgeData_st; +impl CUgraphInstantiateResult_enum { + ///< Instantiation succeeded + pub const CUDA_GRAPH_INSTANTIATE_SUCCESS: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum( + 0, + ); +} +impl CUgraphInstantiateResult_enum { + ///< Instantiation failed for an unexpected reason which is described in the return value of the function + pub const CUDA_GRAPH_INSTANTIATE_ERROR: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum( + 1, + ); +} +impl CUgraphInstantiateResult_enum { + ///< Instantiation failed due to invalid structure, such as cycles + pub const CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum( + 2, + ); +} +impl CUgraphInstantiateResult_enum { + ///< Instantiation for device launch failed because the graph contained an unsupported operation + pub const CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum( + 3, + ); +} +impl CUgraphInstantiateResult_enum { + ///< Instantiation for device launch failed due to the nodes belonging to different contexts + pub const CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED: CUgraphInstantiateResult_enum = CUgraphInstantiateResult_enum( + 4, + ); +} +#[repr(transparent)] +/// Graph instantiation results +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphInstantiateResult_enum(pub ::core::ffi::c_uint); +/// Graph instantiation results +pub use self::CUgraphInstantiateResult_enum as CUgraphInstantiateResult; +/// Graph instantiation parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_GRAPH_INSTANTIATE_PARAMS_st { + ///< Instantiation flags + pub flags: cuuint64_t, + ///< Upload stream + pub hUploadStream: CUstream, + ///< The node which caused instantiation to fail, if any + pub hErrNode_out: CUgraphNode, + ///< Whether instantiation was successful. If it failed, the reason why + pub result_out: CUgraphInstantiateResult, +} +/// Graph instantiation parameters +pub type CUDA_GRAPH_INSTANTIATE_PARAMS = CUDA_GRAPH_INSTANTIATE_PARAMS_st; +impl CUsynchronizationPolicy_enum { + pub const CU_SYNC_POLICY_AUTO: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum( + 1, + ); +} +impl CUsynchronizationPolicy_enum { + pub const CU_SYNC_POLICY_SPIN: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum( + 2, + ); +} +impl CUsynchronizationPolicy_enum { + pub const CU_SYNC_POLICY_YIELD: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum( + 3, + ); +} +impl CUsynchronizationPolicy_enum { + pub const CU_SYNC_POLICY_BLOCKING_SYNC: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum( + 4, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUsynchronizationPolicy_enum(pub ::core::ffi::c_uint); +pub use self::CUsynchronizationPolicy_enum as CUsynchronizationPolicy; +impl CUclusterSchedulingPolicy_enum { + ///< the default policy + pub const CU_CLUSTER_SCHEDULING_POLICY_DEFAULT: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum( + 0, + ); +} +impl CUclusterSchedulingPolicy_enum { + ///< spread the blocks within a cluster to the SMs + pub const CU_CLUSTER_SCHEDULING_POLICY_SPREAD: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum( + 1, + ); +} +impl CUclusterSchedulingPolicy_enum { + ///< allow the hardware to load-balance the blocks in a cluster to the SMs + pub const CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING: CUclusterSchedulingPolicy_enum = CUclusterSchedulingPolicy_enum( + 2, + ); +} +#[repr(transparent)] +/// Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUclusterSchedulingPolicy_enum(pub ::core::ffi::c_uint); +/// Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute +pub use self::CUclusterSchedulingPolicy_enum as CUclusterSchedulingPolicy; +impl CUlaunchMemSyncDomain_enum { + ///< Launch kernels in the default domain + pub const CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT: CUlaunchMemSyncDomain_enum = CUlaunchMemSyncDomain_enum( + 0, + ); +} +impl CUlaunchMemSyncDomain_enum { + ///< Launch kernels in the remote domain + pub const CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE: CUlaunchMemSyncDomain_enum = CUlaunchMemSyncDomain_enum( + 1, + ); +} +#[repr(transparent)] +/** Memory Synchronization Domain + + A kernel can be launched in a specified memory synchronization domain that affects all memory operations issued by + that kernel. A memory barrier issued in one domain will only order memory operations in that domain, thus eliminating + latency increase from memory barriers ordering unrelated traffic. + + By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a + different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream / + graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, ::cuStreamSetAttribute, ::cuLaunchKernelEx, + ::cuGraphKernelNodeSetAttribute. + + Memory operations done in kernels launched in different domains are considered system-scope distanced. In other + words, a GPU scoped memory synchronization is not sufficient for memory order to be observed by kernels in another + memory synchronization domain even if they are on the same GPU.*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchMemSyncDomain_enum(pub ::core::ffi::c_uint); +/** Memory Synchronization Domain + + A kernel can be launched in a specified memory synchronization domain that affects all memory operations issued by + that kernel. A memory barrier issued in one domain will only order memory operations in that domain, thus eliminating + latency increase from memory barriers ordering unrelated traffic. + + By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a + different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream / + graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN, ::cuStreamSetAttribute, ::cuLaunchKernelEx, + ::cuGraphKernelNodeSetAttribute. + + Memory operations done in kernels launched in different domains are considered system-scope distanced. In other + words, a GPU scoped memory synchronization is not sufficient for memory order to be observed by kernels in another + memory synchronization domain even if they are on the same GPU.*/ +pub use self::CUlaunchMemSyncDomain_enum as CUlaunchMemSyncDomain; +/** Memory Synchronization Domain map + + See ::cudaLaunchMemSyncDomain. + + By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a + different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream / + graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. + + Domain ID range is available through ::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchMemSyncDomainMap_st { + ///< The default domain ID to use for designated kernels + pub default_: ::core::ffi::c_uchar, + ///< The remote domain ID to use for designated kernels + pub remote: ::core::ffi::c_uchar, +} +/** Memory Synchronization Domain map + + See ::cudaLaunchMemSyncDomain. + + By default, kernels are launched in domain 0. Kernel launched with ::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE will have a + different domain ID. User may also alter the domain ID with ::CUlaunchMemSyncDomainMap for a specific stream / + graph node / kernel launch. See ::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. + + Domain ID range is available through ::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT.*/ +pub type CUlaunchMemSyncDomainMap = CUlaunchMemSyncDomainMap_st; +impl CUlaunchAttributeID_enum { + ///< Ignored entry, for convenient composition + pub const CU_LAUNCH_ATTRIBUTE_IGNORE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 0, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for streams, graph nodes, launches. See +::CUlaunchAttributeValue::accessPolicyWindow.*/ + pub const CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 1, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for graph nodes, launches. See +::CUlaunchAttributeValue::cooperative.*/ + pub const CU_LAUNCH_ATTRIBUTE_COOPERATIVE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 2, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for streams. See +::CUlaunchAttributeValue::syncPolicy.*/ + pub const CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 3, + ); +} +impl CUlaunchAttributeID_enum { + ///< Valid for graph nodes, launches. See ::CUlaunchAttributeValue::clusterDim. + pub const CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 4, + ); +} +impl CUlaunchAttributeID_enum { + ///< Valid for graph nodes, launches. See ::CUlaunchAttributeValue::clusterSchedulingPolicyPreference. + pub const CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 5, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for launches. Setting +::CUlaunchAttributeValue::programmaticStreamSerializationAllowed +to non-0 signals that the kernel will use programmatic +means to resolve its stream dependency, so that the +CUDA runtime should opportunistically allow the grid's +execution to overlap with the previous kernel in the +stream, if that kernel requests the overlap. The +dependent launches can choose to wait on the +dependency using the programmatic sync +(cudaGridDependencySynchronize() or equivalent PTX +instructions).*/ + pub const CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 6, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for launches. Set +::CUlaunchAttributeValue::programmaticEvent to +record the event. Event recorded through this +launch attribute is guaranteed to only trigger +after all block in the associated kernel trigger +the event. A block can trigger the event through +PTX launchdep.release or CUDA builtin function +cudaTriggerProgrammaticLaunchCompletion(). A +trigger can also be inserted at the beginning of +each block's execution if triggerAtBlockStart is +set to non-0. The dependent launches can choose to +wait on the dependency using the programmatic sync +(cudaGridDependencySynchronize() or equivalent PTX +instructions). Note that dependents (including the +CPU thread calling cuEventSynchronize()) are not +guaranteed to observe the release precisely when +it is released. For example, cuEventSynchronize() +may only observe the event trigger long after the +associated kernel has completed. This recording +type is primarily meant for establishing +programmatic dependency between device tasks. Note +also this type of dependency allows, but does not +guarantee, concurrent execution of tasks. +
+The event supplied must not be an interprocess or +interop event. The event must disable timing (i.e. +must be created with the ::CU_EVENT_DISABLE_TIMING +flag set).*/ + pub const CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 7, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for streams, graph nodes, launches. See +::CUlaunchAttributeValue::priority.*/ + pub const CU_LAUNCH_ATTRIBUTE_PRIORITY: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 8, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for streams, graph nodes, launches. See +::CUlaunchAttributeValue::memSyncDomainMap.*/ + pub const CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 9, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for streams, graph nodes, launches. See +::CUlaunchAttributeValue::memSyncDomain.*/ + pub const CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 10, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for launches. Set +::CUlaunchAttributeValue::launchCompletionEvent to record the +event. +
+Nominally, the event is triggered once all blocks of the kernel +have begun execution. Currently this is a best effort. If a kernel +B has a launch completion dependency on a kernel A, B may wait +until A is complete. Alternatively, blocks of B may begin before +all blocks of A have begun, for example if B can claim execution +resources unavailable to A (e.g. they run on different GPUs) or +if B is a higher priority than A. +Exercise caution if such an ordering inversion could lead +to deadlock. +
+A launch completion event is nominally similar to a programmatic +event with \c triggerAtBlockStart set except that it is not +visible to \c cudaGridDependencySynchronize() and can be used with +compute capability less than 9.0. +
+The event supplied must not be an interprocess or interop +event. The event must disable timing (i.e. must be created +with the ::CU_EVENT_DISABLE_TIMING flag set).*/ + pub const CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 12, + ); +} +impl CUlaunchAttributeID_enum { + /**< Valid for graph nodes, launches. This attribute is graphs-only, +and passing it to a launch in a non-capturing stream will result +in an error. +
+::CUlaunchAttributeValue::deviceUpdatableKernelNode::deviceUpdatable can +only be set to 0 or 1. Setting the field to 1 indicates that the +corresponding kernel node should be device-updatable. On success, a handle +will be returned via +::CUlaunchAttributeValue::deviceUpdatableKernelNode::devNode which can be +passed to the various device-side update functions to update the node's +kernel parameters from within another kernel. For more information on the +types of device updates that can be made, as well as the relevant limitations +thereof, see ::cudaGraphKernelNodeUpdatesApply. +
+Nodes which are device-updatable have additional restrictions compared to +regular kernel nodes. Firstly, device-updatable nodes cannot be removed +from their graph via ::cuGraphDestroyNode. Additionally, once opted-in +to this functionality, a node cannot opt out, and any attempt to set the +deviceUpdatable attribute to 0 will result in an error. Device-updatable +kernel nodes also cannot have their attributes copied to/from another kernel +node via ::cuGraphKernelNodeCopyAttributes. Graphs containing one or more +device-updatable nodes also do not allow multiple instantiation, and neither +the graph nor its instantiated version can be passed to ::cuGraphExecUpdate. +
+If a graph contains device-updatable nodes and updates those nodes from the device +from within the graph, the graph must be uploaded with ::cuGraphUpload before it +is launched. For such a graph, if host-side executable graph updates are made to the +device-updatable nodes, the graph must be uploaded before it is launched again.*/ + pub const CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 13, + ); +} +impl CUlaunchAttributeID_enum { + pub const CU_LAUNCH_ATTRIBUTE_MAX: CUlaunchAttributeID_enum = CUlaunchAttributeID_enum( + 14, + ); +} +#[repr(transparent)] +/// Launch attributes enum; used as id field of ::CUlaunchAttribute +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchAttributeID_enum(pub ::core::ffi::c_uint); +/// Launch attributes enum; used as id field of ::CUlaunchAttribute +pub use self::CUlaunchAttributeID_enum as CUlaunchAttributeID; +/// Launch attributes union; used as value field of ::CUlaunchAttribute +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUlaunchAttributeValue_union { + pub pad: [::core::ffi::c_char; 64usize], + ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW. + pub accessPolicyWindow: CUaccessPolicyWindow, + /**< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_COOPERATIVE. Nonzero indicates a cooperative +kernel (see ::cuLaunchCooperativeKernel).*/ + pub cooperative: ::core::ffi::c_int, + /**< Value of launch attribute +::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY. ::CUsynchronizationPolicy for +work queued up in this stream*/ + pub syncPolicy: CUsynchronizationPolicy, + pub clusterDim: CUlaunchAttributeValue_union__bindgen_ty_1, + /**< Value of launch attribute +::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE. Cluster +scheduling policy preference for the kernel.*/ + pub clusterSchedulingPolicyPreference: CUclusterSchedulingPolicy, + /**< Value of launch attribute +::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION.*/ + pub programmaticStreamSerializationAllowed: ::core::ffi::c_int, + ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT. + pub programmaticEvent: CUlaunchAttributeValue_union__bindgen_ty_2, + ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT. + pub launchCompletionEvent: CUlaunchAttributeValue_union__bindgen_ty_3, + ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_PRIORITY. Execution priority of the kernel. + pub priority: ::core::ffi::c_int, + /**< Value of launch attribute +::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP. See +::CUlaunchMemSyncDomainMap.*/ + pub memSyncDomainMap: CUlaunchMemSyncDomainMap, + /**< Value of launch attribute +::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN. See::CUlaunchMemSyncDomain*/ + pub memSyncDomain: CUlaunchMemSyncDomain, + ///< Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE. + pub deviceUpdatableKernelNode: CUlaunchAttributeValue_union__bindgen_ty_4, +} +/** Value of launch attribute ::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION that + represents the desired cluster dimensions for the kernel. Opaque type + with the following fields: + - \p x - The X dimension of the cluster, in blocks. Must be a divisor + of the grid X dimension. + - \p y - The Y dimension of the cluster, in blocks. Must be a divisor + of the grid Y dimension. + - \p z - The Z dimension of the cluster, in blocks. Must be a divisor + of the grid Z dimension.*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchAttributeValue_union__bindgen_ty_1 { + pub x: ::core::ffi::c_uint, + pub y: ::core::ffi::c_uint, + pub z: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchAttributeValue_union__bindgen_ty_2 { + ///< Event to fire when all blocks trigger it + pub event: CUevent, + /**< Event record flags, see ::cuEventRecordWithFlags. Does not accept +::CU_EVENT_RECORD_EXTERNAL.*/ + pub flags: ::core::ffi::c_int, + ///< If this is set to non-0, each block launch will automatically trigger the event + pub triggerAtBlockStart: ::core::ffi::c_int, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchAttributeValue_union__bindgen_ty_3 { + ///< Event to fire when the last block launches + pub event: CUevent, + ///< Event record flags, see ::cuEventRecordWithFlags. Does not accept ::CU_EVENT_RECORD_EXTERNAL. + pub flags: ::core::ffi::c_int, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchAttributeValue_union__bindgen_ty_4 { + ///< Whether or not the resulting kernel node should be device-updatable. + pub deviceUpdatable: ::core::ffi::c_int, + ///< Returns a handle to pass to the various device-side update functions. + pub devNode: CUgraphDeviceNode, +} +/// Launch attributes union; used as value field of ::CUlaunchAttribute +pub type CUlaunchAttributeValue = CUlaunchAttributeValue_union; +/// Launch attribute +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUlaunchAttribute_st { + ///< Attribute to set + pub id: CUlaunchAttributeID, + pub pad: [::core::ffi::c_char; 4usize], + ///< Value of the attribute + pub value: CUlaunchAttributeValue, +} +/// Launch attribute +pub type CUlaunchAttribute = CUlaunchAttribute_st; +/// CUDA extensible launch configuration +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlaunchConfig_st { + ///< Width of grid in blocks + pub gridDimX: ::core::ffi::c_uint, + ///< Height of grid in blocks + pub gridDimY: ::core::ffi::c_uint, + ///< Depth of grid in blocks + pub gridDimZ: ::core::ffi::c_uint, + ///< X dimension of each thread block + pub blockDimX: ::core::ffi::c_uint, + ///< Y dimension of each thread block + pub blockDimY: ::core::ffi::c_uint, + ///< Z dimension of each thread block + pub blockDimZ: ::core::ffi::c_uint, + ///< Dynamic shared-memory size per thread block in bytes + pub sharedMemBytes: ::core::ffi::c_uint, + ///< Stream identifier + pub hStream: CUstream, + ///< List of attributes; nullable if ::CUlaunchConfig::numAttrs == 0 + pub attrs: *mut CUlaunchAttribute, + ///< Number of attributes populated in ::CUlaunchConfig::attrs + pub numAttrs: ::core::ffi::c_uint, +} +/// CUDA extensible launch configuration +pub type CUlaunchConfig = CUlaunchConfig_st; +/// Launch attributes enum; used as id field of ::CUlaunchAttribute +pub use self::CUlaunchAttributeID as CUkernelNodeAttrID; +/// Launch attributes union; used as value field of ::CUlaunchAttribute +pub type CUkernelNodeAttrValue_v1 = CUlaunchAttributeValue; +/// Launch attributes union; used as value field of ::CUlaunchAttribute +pub type CUkernelNodeAttrValue = CUkernelNodeAttrValue_v1; +impl CUstreamCaptureStatus_enum { + ///< Stream is not capturing + pub const CU_STREAM_CAPTURE_STATUS_NONE: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum( + 0, + ); +} +impl CUstreamCaptureStatus_enum { + ///< Stream is actively capturing + pub const CU_STREAM_CAPTURE_STATUS_ACTIVE: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum( + 1, + ); +} +impl CUstreamCaptureStatus_enum { + /**< Stream is part of a capture sequence that +has been invalidated, but not terminated*/ + pub const CU_STREAM_CAPTURE_STATUS_INVALIDATED: CUstreamCaptureStatus_enum = CUstreamCaptureStatus_enum( + 2, + ); +} +#[repr(transparent)] +/// Possible stream capture statuses returned by ::cuStreamIsCapturing +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamCaptureStatus_enum(pub ::core::ffi::c_uint); +/// Possible stream capture statuses returned by ::cuStreamIsCapturing +pub use self::CUstreamCaptureStatus_enum as CUstreamCaptureStatus; +impl CUstreamCaptureMode_enum { + pub const CU_STREAM_CAPTURE_MODE_GLOBAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum( + 0, + ); +} +impl CUstreamCaptureMode_enum { + pub const CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum( + 1, + ); +} +impl CUstreamCaptureMode_enum { + pub const CU_STREAM_CAPTURE_MODE_RELAXED: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum( + 2, + ); +} +#[repr(transparent)] +/** Possible modes for stream capture thread interactions. For more details see + ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUstreamCaptureMode_enum(pub ::core::ffi::c_uint); +/// Launch attributes enum; used as id field of ::CUlaunchAttribute +pub use self::CUlaunchAttributeID as CUstreamAttrID; +/** Possible modes for stream capture thread interactions. For more details see + ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode*/ +pub use self::CUstreamCaptureMode_enum as CUstreamCaptureMode; +/// Launch attributes union; used as value field of ::CUlaunchAttribute +pub type CUstreamAttrValue_v1 = CUlaunchAttributeValue; +/// Launch attributes union; used as value field of ::CUlaunchAttribute +pub type CUstreamAttrValue = CUstreamAttrValue_v1; +impl CUdriverProcAddress_flags_enum { + ///< Default search mode for driver symbols. + pub const CU_GET_PROC_ADDRESS_DEFAULT: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum( + 0, + ); +} +impl CUdriverProcAddress_flags_enum { + ///< Search for legacy versions of driver symbols. + pub const CU_GET_PROC_ADDRESS_LEGACY_STREAM: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum( + 1, + ); +} +impl CUdriverProcAddress_flags_enum { + ///< Search for per-thread versions of driver symbols. + pub const CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM: CUdriverProcAddress_flags_enum = CUdriverProcAddress_flags_enum( + 2, + ); +} +#[repr(transparent)] +/// Flags to specify search options. For more details see ::cuGetProcAddress +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdriverProcAddress_flags_enum(pub ::core::ffi::c_uint); +/// Flags to specify search options. For more details see ::cuGetProcAddress +pub use self::CUdriverProcAddress_flags_enum as CUdriverProcAddress_flags; +impl CUdriverProcAddressQueryResult_enum { + ///< Symbol was succesfully found + pub const CU_GET_PROC_ADDRESS_SUCCESS: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum( + 0, + ); +} +impl CUdriverProcAddressQueryResult_enum { + ///< Symbol was not found in search + pub const CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum( + 1, + ); +} +impl CUdriverProcAddressQueryResult_enum { + ///< Symbol was found but version supplied was not sufficient + pub const CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT: CUdriverProcAddressQueryResult_enum = CUdriverProcAddressQueryResult_enum( + 2, + ); +} +#[repr(transparent)] +/// Flags to indicate search status. For more details see ::cuGetProcAddress +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdriverProcAddressQueryResult_enum(pub ::core::ffi::c_uint); +/// Flags to indicate search status. For more details see ::cuGetProcAddress +pub use self::CUdriverProcAddressQueryResult_enum as CUdriverProcAddressQueryResult; +impl CUexecAffinityType_enum { + ///< Create a context with limited SMs. + pub const CU_EXEC_AFFINITY_TYPE_SM_COUNT: CUexecAffinityType_enum = CUexecAffinityType_enum( + 0, + ); +} +impl CUexecAffinityType_enum { + pub const CU_EXEC_AFFINITY_TYPE_MAX: CUexecAffinityType_enum = CUexecAffinityType_enum( + 1, + ); +} +#[repr(transparent)] +/// Execution Affinity Types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUexecAffinityType_enum(pub ::core::ffi::c_uint); +/// Execution Affinity Types +pub use self::CUexecAffinityType_enum as CUexecAffinityType; +/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUexecAffinitySmCount_st { + ///< The number of SMs the context is limited to use. + pub val: ::core::ffi::c_uint, +} +/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT +pub type CUexecAffinitySmCount_v1 = CUexecAffinitySmCount_st; +/// Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT +pub type CUexecAffinitySmCount = CUexecAffinitySmCount_v1; +/// Execution Affinity Parameters +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUexecAffinityParam_st { + pub type_: CUexecAffinityType, + pub param: CUexecAffinityParam_st__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUexecAffinityParam_st__bindgen_ty_1 { + pub smCount: CUexecAffinitySmCount, +} +/// Execution Affinity Parameters +pub type CUexecAffinityParam_v1 = CUexecAffinityParam_st; +/// Execution Affinity Parameters +pub type CUexecAffinityParam = CUexecAffinityParam_v1; +impl CUlibraryOption_enum { + pub const CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE: CUlibraryOption_enum = CUlibraryOption_enum( + 0, + ); +} +impl CUlibraryOption_enum { + /** Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved. + Specifying this option will let the driver know that \p code can be accessed at any point + until ::cuLibraryUnload(). The default behavior is for the driver to allocate and + maintain its own copy of \p code. Note that this is only a memory usage optimization + hint and the driver can choose to ignore it if required. + Specifying this option with ::cuLibraryLoadFromFile() is invalid and + will return ::CUDA_ERROR_INVALID_VALUE.*/ + pub const CU_LIBRARY_BINARY_IS_PRESERVED: CUlibraryOption_enum = CUlibraryOption_enum( + 1, + ); +} +impl CUlibraryOption_enum { + /** Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved. + Specifying this option will let the driver know that \p code can be accessed at any point + until ::cuLibraryUnload(). The default behavior is for the driver to allocate and + maintain its own copy of \p code. Note that this is only a memory usage optimization + hint and the driver can choose to ignore it if required. + Specifying this option with ::cuLibraryLoadFromFile() is invalid and + will return ::CUDA_ERROR_INVALID_VALUE.*/ + pub const CU_LIBRARY_NUM_OPTIONS: CUlibraryOption_enum = CUlibraryOption_enum(2); +} +#[repr(transparent)] +/// Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile() +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlibraryOption_enum(pub ::core::ffi::c_uint); +/// Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile() +pub use self::CUlibraryOption_enum as CUlibraryOption; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUlibraryHostUniversalFunctionAndDataTable_st { + pub functionTable: *mut ::core::ffi::c_void, + pub functionWindowSize: usize, + pub dataTable: *mut ::core::ffi::c_void, + pub dataWindowSize: usize, +} +pub type CUlibraryHostUniversalFunctionAndDataTable = CUlibraryHostUniversalFunctionAndDataTable_st; +/// Error codes +#[must_use] +pub type cudaError_enum = ::core::ffi::c_uint; +impl CUdevice_P2PAttribute_enum { + ///< A relative value indicating the performance of the link between two devices + pub const CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum( + 1, + ); +} +impl CUdevice_P2PAttribute_enum { + ///< P2P Access is enable + pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum( + 2, + ); +} +impl CUdevice_P2PAttribute_enum { + ///< Atomic operation over the link supported + pub const CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum( + 3, + ); +} +impl CUdevice_P2PAttribute_enum { + ///< \deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead + pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum( + 4, + ); +} +impl CUdevice_P2PAttribute_enum { + ///< Accessing CUDA arrays over the link supported + pub const CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = CUdevice_P2PAttribute_enum( + 4, + ); +} +#[repr(transparent)] +/// P2P Attributes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdevice_P2PAttribute_enum(pub ::core::ffi::c_uint); +/// P2P Attributes +pub use self::CUdevice_P2PAttribute_enum as CUdevice_P2PAttribute; +/** CUDA stream callback + \param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL. + \param status ::CUDA_SUCCESS or any persistent error on the stream. + \param userData User parameter provided at registration.*/ +pub type CUstreamCallback = ::core::option::Option< + unsafe extern "system" fn( + hStream: CUstream, + status: CUresult, + userData: *mut ::core::ffi::c_void, + ), +>; +/** Block size to per-block dynamic shared memory mapping for a certain + kernel \param blockSize Block size of the kernel. + + \return The dynamic shared memory needed by a block.*/ +pub type CUoccupancyB2DSize = ::core::option::Option< + unsafe extern "system" fn(blockSize: ::core::ffi::c_int) -> usize, +>; +/// 2D memory copy parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMCPY2D_st { + ///< Source X in bytes + pub srcXInBytes: usize, + ///< Source Y + pub srcY: usize, + ///< Source memory type (host, device, array) + pub srcMemoryType: CUmemorytype, + ///< Source host pointer + pub srcHost: *const ::core::ffi::c_void, + ///< Source device pointer + pub srcDevice: CUdeviceptr, + ///< Source array reference + pub srcArray: CUarray, + ///< Source pitch (ignored when src is array) + pub srcPitch: usize, + ///< Destination X in bytes + pub dstXInBytes: usize, + ///< Destination Y + pub dstY: usize, + ///< Destination memory type (host, device, array) + pub dstMemoryType: CUmemorytype, + ///< Destination host pointer + pub dstHost: *mut ::core::ffi::c_void, + ///< Destination device pointer + pub dstDevice: CUdeviceptr, + ///< Destination array reference + pub dstArray: CUarray, + ///< Destination pitch (ignored when dst is array) + pub dstPitch: usize, + ///< Width of 2D memory copy in bytes + pub WidthInBytes: usize, + ///< Height of 2D memory copy + pub Height: usize, +} +/// 2D memory copy parameters +pub type CUDA_MEMCPY2D_v2 = CUDA_MEMCPY2D_st; +/// 2D memory copy parameters +pub type CUDA_MEMCPY2D = CUDA_MEMCPY2D_v2; +/// 3D memory copy parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMCPY3D_st { + ///< Source X in bytes + pub srcXInBytes: usize, + ///< Source Y + pub srcY: usize, + ///< Source Z + pub srcZ: usize, + ///< Source LOD + pub srcLOD: usize, + ///< Source memory type (host, device, array) + pub srcMemoryType: CUmemorytype, + ///< Source host pointer + pub srcHost: *const ::core::ffi::c_void, + ///< Source device pointer + pub srcDevice: CUdeviceptr, + ///< Source array reference + pub srcArray: CUarray, + ///< Must be NULL + pub reserved0: *mut ::core::ffi::c_void, + ///< Source pitch (ignored when src is array) + pub srcPitch: usize, + ///< Source height (ignored when src is array; may be 0 if Depth==1) + pub srcHeight: usize, + ///< Destination X in bytes + pub dstXInBytes: usize, + ///< Destination Y + pub dstY: usize, + ///< Destination Z + pub dstZ: usize, + ///< Destination LOD + pub dstLOD: usize, + ///< Destination memory type (host, device, array) + pub dstMemoryType: CUmemorytype, + ///< Destination host pointer + pub dstHost: *mut ::core::ffi::c_void, + ///< Destination device pointer + pub dstDevice: CUdeviceptr, + ///< Destination array reference + pub dstArray: CUarray, + ///< Must be NULL + pub reserved1: *mut ::core::ffi::c_void, + ///< Destination pitch (ignored when dst is array) + pub dstPitch: usize, + ///< Destination height (ignored when dst is array; may be 0 if Depth==1) + pub dstHeight: usize, + ///< Width of 3D memory copy in bytes + pub WidthInBytes: usize, + ///< Height of 3D memory copy + pub Height: usize, + ///< Depth of 3D memory copy + pub Depth: usize, +} +/// 3D memory copy parameters +pub type CUDA_MEMCPY3D_v2 = CUDA_MEMCPY3D_st; +/// 3D memory copy parameters +pub type CUDA_MEMCPY3D = CUDA_MEMCPY3D_v2; +/// 3D memory cross-context copy parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMCPY3D_PEER_st { + ///< Source X in bytes + pub srcXInBytes: usize, + ///< Source Y + pub srcY: usize, + ///< Source Z + pub srcZ: usize, + ///< Source LOD + pub srcLOD: usize, + ///< Source memory type (host, device, array) + pub srcMemoryType: CUmemorytype, + ///< Source host pointer + pub srcHost: *const ::core::ffi::c_void, + ///< Source device pointer + pub srcDevice: CUdeviceptr, + ///< Source array reference + pub srcArray: CUarray, + ///< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY) + pub srcContext: CUcontext, + ///< Source pitch (ignored when src is array) + pub srcPitch: usize, + ///< Source height (ignored when src is array; may be 0 if Depth==1) + pub srcHeight: usize, + ///< Destination X in bytes + pub dstXInBytes: usize, + ///< Destination Y + pub dstY: usize, + ///< Destination Z + pub dstZ: usize, + ///< Destination LOD + pub dstLOD: usize, + ///< Destination memory type (host, device, array) + pub dstMemoryType: CUmemorytype, + ///< Destination host pointer + pub dstHost: *mut ::core::ffi::c_void, + ///< Destination device pointer + pub dstDevice: CUdeviceptr, + ///< Destination array reference + pub dstArray: CUarray, + ///< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY) + pub dstContext: CUcontext, + ///< Destination pitch (ignored when dst is array) + pub dstPitch: usize, + ///< Destination height (ignored when dst is array; may be 0 if Depth==1) + pub dstHeight: usize, + ///< Width of 3D memory copy in bytes + pub WidthInBytes: usize, + ///< Height of 3D memory copy + pub Height: usize, + ///< Depth of 3D memory copy + pub Depth: usize, +} +/// 3D memory cross-context copy parameters +pub type CUDA_MEMCPY3D_PEER_v1 = CUDA_MEMCPY3D_PEER_st; +/// 3D memory cross-context copy parameters +pub type CUDA_MEMCPY3D_PEER = CUDA_MEMCPY3D_PEER_v1; +/// Memcpy node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMCPY_NODE_PARAMS_st { + ///< Must be zero + pub flags: ::core::ffi::c_int, + ///< Must be zero + pub reserved: ::core::ffi::c_int, + ///< Context on which to run the node + pub copyCtx: CUcontext, + ///< Parameters for the memory copy + pub copyParams: CUDA_MEMCPY3D, +} +/// Memcpy node parameters +pub type CUDA_MEMCPY_NODE_PARAMS = CUDA_MEMCPY_NODE_PARAMS_st; +/// Array descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_ARRAY_DESCRIPTOR_st { + ///< Width of array + pub Width: usize, + ///< Height of array + pub Height: usize, + ///< Array format + pub Format: CUarray_format, + ///< Channels per array element + pub NumChannels: ::core::ffi::c_uint, +} +/// Array descriptor +pub type CUDA_ARRAY_DESCRIPTOR_v2 = CUDA_ARRAY_DESCRIPTOR_st; +/// Array descriptor +pub type CUDA_ARRAY_DESCRIPTOR = CUDA_ARRAY_DESCRIPTOR_v2; +/// 3D array descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_ARRAY3D_DESCRIPTOR_st { + ///< Width of 3D array + pub Width: usize, + ///< Height of 3D array + pub Height: usize, + ///< Depth of 3D array + pub Depth: usize, + ///< Array format + pub Format: CUarray_format, + ///< Channels per array element + pub NumChannels: ::core::ffi::c_uint, + ///< Flags + pub Flags: ::core::ffi::c_uint, +} +/// 3D array descriptor +pub type CUDA_ARRAY3D_DESCRIPTOR_v2 = CUDA_ARRAY3D_DESCRIPTOR_st; +/// 3D array descriptor +pub type CUDA_ARRAY3D_DESCRIPTOR = CUDA_ARRAY3D_DESCRIPTOR_v2; +/// CUDA array sparse properties +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st { + pub tileExtent: CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1, + /// First mip level at which the mip tail begins. + pub miptailFirstLevel: ::core::ffi::c_uint, + /// Total size of the mip tail. + pub miptailSize: ::core::ffi::c_ulonglong, + /// Flags will either be zero or ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 4usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 { + ///< Width of sparse tile in elements + pub width: ::core::ffi::c_uint, + ///< Height of sparse tile in elements + pub height: ::core::ffi::c_uint, + ///< Depth of sparse tile in elements + pub depth: ::core::ffi::c_uint, +} +/// CUDA array sparse properties +pub type CUDA_ARRAY_SPARSE_PROPERTIES_v1 = CUDA_ARRAY_SPARSE_PROPERTIES_st; +/// CUDA array sparse properties +pub type CUDA_ARRAY_SPARSE_PROPERTIES = CUDA_ARRAY_SPARSE_PROPERTIES_v1; +/// CUDA array memory requirements +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_ARRAY_MEMORY_REQUIREMENTS_st { + ///< Total required memory size + pub size: usize, + ///< alignment requirement + pub alignment: usize, + pub reserved: [::core::ffi::c_uint; 4usize], +} +/// CUDA array memory requirements +pub type CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 = CUDA_ARRAY_MEMORY_REQUIREMENTS_st; +/// CUDA array memory requirements +pub type CUDA_ARRAY_MEMORY_REQUIREMENTS = CUDA_ARRAY_MEMORY_REQUIREMENTS_v1; +/// CUDA Resource descriptor +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUDA_RESOURCE_DESC_st { + ///< Resource type + pub resType: CUresourcetype, + pub res: CUDA_RESOURCE_DESC_st__bindgen_ty_1, + ///< Flags (must be zero) + pub flags: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUDA_RESOURCE_DESC_st__bindgen_ty_1 { + pub array: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1, + pub mipmap: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2, + pub linear: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3, + pub pitch2D: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4, + pub reserved: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 { + ///< CUDA array + pub hArray: CUarray, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 { + ///< CUDA mipmapped array + pub hMipmappedArray: CUmipmappedArray, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 { + ///< Device pointer + pub devPtr: CUdeviceptr, + ///< Array format + pub format: CUarray_format, + ///< Channels per array element + pub numChannels: ::core::ffi::c_uint, + ///< Size in bytes + pub sizeInBytes: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 { + ///< Device pointer + pub devPtr: CUdeviceptr, + ///< Array format + pub format: CUarray_format, + ///< Channels per array element + pub numChannels: ::core::ffi::c_uint, + ///< Width of the array in elements + pub width: usize, + ///< Height of the array in elements + pub height: usize, + ///< Pitch between two rows in bytes + pub pitchInBytes: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 { + pub reserved: [::core::ffi::c_int; 32usize], +} +/// CUDA Resource descriptor +pub type CUDA_RESOURCE_DESC_v1 = CUDA_RESOURCE_DESC_st; +/// CUDA Resource descriptor +pub type CUDA_RESOURCE_DESC = CUDA_RESOURCE_DESC_v1; +/// Texture descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct CUDA_TEXTURE_DESC_st { + ///< Address modes + pub addressMode: [CUaddress_mode; 3usize], + ///< Filter mode + pub filterMode: CUfilter_mode, + ///< Flags + pub flags: ::core::ffi::c_uint, + ///< Maximum anisotropy ratio + pub maxAnisotropy: ::core::ffi::c_uint, + ///< Mipmap filter mode + pub mipmapFilterMode: CUfilter_mode, + ///< Mipmap level bias + pub mipmapLevelBias: f32, + ///< Mipmap minimum level clamp + pub minMipmapLevelClamp: f32, + ///< Mipmap maximum level clamp + pub maxMipmapLevelClamp: f32, + ///< Border Color + pub borderColor: [f32; 4usize], + pub reserved: [::core::ffi::c_int; 12usize], +} +/// Texture descriptor +pub type CUDA_TEXTURE_DESC_v1 = CUDA_TEXTURE_DESC_st; +/// Texture descriptor +pub type CUDA_TEXTURE_DESC = CUDA_TEXTURE_DESC_v1; +impl CUresourceViewFormat_enum { + ///< No resource view format (use underlying resource format) + pub const CU_RES_VIEW_FORMAT_NONE: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 0, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel unsigned 8-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 1, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel unsigned 8-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 2, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel unsigned 8-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 3, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel signed 8-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 4, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel signed 8-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 5, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel signed 8-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 6, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel unsigned 16-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 7, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel unsigned 16-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 8, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel unsigned 16-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 9, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel signed 16-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 10, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel signed 16-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 11, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel signed 16-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 12, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel unsigned 32-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 13, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel unsigned 32-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 14, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel unsigned 32-bit integers + pub const CU_RES_VIEW_FORMAT_UINT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 15, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel signed 32-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 16, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel signed 32-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 17, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel signed 32-bit integers + pub const CU_RES_VIEW_FORMAT_SINT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 18, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel 16-bit floating point + pub const CU_RES_VIEW_FORMAT_FLOAT_1X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 19, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel 16-bit floating point + pub const CU_RES_VIEW_FORMAT_FLOAT_2X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 20, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel 16-bit floating point + pub const CU_RES_VIEW_FORMAT_FLOAT_4X16: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 21, + ); +} +impl CUresourceViewFormat_enum { + ///< 1 channel 32-bit floating point + pub const CU_RES_VIEW_FORMAT_FLOAT_1X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 22, + ); +} +impl CUresourceViewFormat_enum { + ///< 2 channel 32-bit floating point + pub const CU_RES_VIEW_FORMAT_FLOAT_2X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 23, + ); +} +impl CUresourceViewFormat_enum { + ///< 4 channel 32-bit floating point + pub const CU_RES_VIEW_FORMAT_FLOAT_4X32: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 24, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 1 + pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC1: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 25, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 2 + pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC2: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 26, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 3 + pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC3: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 27, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 4 unsigned + pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC4: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 28, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 4 signed + pub const CU_RES_VIEW_FORMAT_SIGNED_BC4: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 29, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 5 unsigned + pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC5: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 30, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 5 signed + pub const CU_RES_VIEW_FORMAT_SIGNED_BC5: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 31, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 6 unsigned half-float + pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC6H: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 32, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 6 signed half-float + pub const CU_RES_VIEW_FORMAT_SIGNED_BC6H: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 33, + ); +} +impl CUresourceViewFormat_enum { + ///< Block compressed 7 + pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC7: CUresourceViewFormat_enum = CUresourceViewFormat_enum( + 34, + ); +} +#[repr(transparent)] +/// Resource view format +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUresourceViewFormat_enum(pub ::core::ffi::c_uint); +/// Resource view format +pub use self::CUresourceViewFormat_enum as CUresourceViewFormat; +/// Resource view descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_RESOURCE_VIEW_DESC_st { + ///< Resource view format + pub format: CUresourceViewFormat, + ///< Width of the resource view + pub width: usize, + ///< Height of the resource view + pub height: usize, + ///< Depth of the resource view + pub depth: usize, + ///< First defined mipmap level + pub firstMipmapLevel: ::core::ffi::c_uint, + ///< Last defined mipmap level + pub lastMipmapLevel: ::core::ffi::c_uint, + ///< First layer index + pub firstLayer: ::core::ffi::c_uint, + ///< Last layer index + pub lastLayer: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +/// Resource view descriptor +pub type CUDA_RESOURCE_VIEW_DESC_v1 = CUDA_RESOURCE_VIEW_DESC_st; +/// Resource view descriptor +pub type CUDA_RESOURCE_VIEW_DESC = CUDA_RESOURCE_VIEW_DESC_v1; +/// Tensor map descriptor. Requires compiler support for aligning to 64 bytes. +#[repr(C)] +#[repr(align(64))] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUtensorMap_st { + pub opaque: [cuuint64_t; 16usize], +} +/// Tensor map descriptor. Requires compiler support for aligning to 64 bytes. +pub type CUtensorMap = CUtensorMap_st; +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_UINT8: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 0, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_UINT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 1, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_UINT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 2, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_INT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 3, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_UINT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 4, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_INT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 5, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 6, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 7, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT64: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 8, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_BFLOAT16: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 9, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 10, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_TFLOAT32: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 11, + ); +} +impl CUtensorMapDataType_enum { + pub const CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ: CUtensorMapDataType_enum = CUtensorMapDataType_enum( + 12, + ); +} +#[repr(transparent)] +/// Tensor map data type +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUtensorMapDataType_enum(pub ::core::ffi::c_uint); +/// Tensor map data type +pub use self::CUtensorMapDataType_enum as CUtensorMapDataType; +impl CUtensorMapInterleave_enum { + pub const CU_TENSOR_MAP_INTERLEAVE_NONE: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum( + 0, + ); +} +impl CUtensorMapInterleave_enum { + pub const CU_TENSOR_MAP_INTERLEAVE_16B: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum( + 1, + ); +} +impl CUtensorMapInterleave_enum { + pub const CU_TENSOR_MAP_INTERLEAVE_32B: CUtensorMapInterleave_enum = CUtensorMapInterleave_enum( + 2, + ); +} +#[repr(transparent)] +/// Tensor map interleave layout type +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUtensorMapInterleave_enum(pub ::core::ffi::c_uint); +/// Tensor map interleave layout type +pub use self::CUtensorMapInterleave_enum as CUtensorMapInterleave; +impl CUtensorMapSwizzle_enum { + pub const CU_TENSOR_MAP_SWIZZLE_NONE: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum( + 0, + ); +} +impl CUtensorMapSwizzle_enum { + pub const CU_TENSOR_MAP_SWIZZLE_32B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum( + 1, + ); +} +impl CUtensorMapSwizzle_enum { + pub const CU_TENSOR_MAP_SWIZZLE_64B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum( + 2, + ); +} +impl CUtensorMapSwizzle_enum { + pub const CU_TENSOR_MAP_SWIZZLE_128B: CUtensorMapSwizzle_enum = CUtensorMapSwizzle_enum( + 3, + ); +} +#[repr(transparent)] +/// Tensor map swizzling mode of shared memory banks +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUtensorMapSwizzle_enum(pub ::core::ffi::c_uint); +/// Tensor map swizzling mode of shared memory banks +pub use self::CUtensorMapSwizzle_enum as CUtensorMapSwizzle; +impl CUtensorMapL2promotion_enum { + pub const CU_TENSOR_MAP_L2_PROMOTION_NONE: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum( + 0, + ); +} +impl CUtensorMapL2promotion_enum { + pub const CU_TENSOR_MAP_L2_PROMOTION_L2_64B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum( + 1, + ); +} +impl CUtensorMapL2promotion_enum { + pub const CU_TENSOR_MAP_L2_PROMOTION_L2_128B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum( + 2, + ); +} +impl CUtensorMapL2promotion_enum { + pub const CU_TENSOR_MAP_L2_PROMOTION_L2_256B: CUtensorMapL2promotion_enum = CUtensorMapL2promotion_enum( + 3, + ); +} +#[repr(transparent)] +/// Tensor map L2 promotion type +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUtensorMapL2promotion_enum(pub ::core::ffi::c_uint); +/// Tensor map L2 promotion type +pub use self::CUtensorMapL2promotion_enum as CUtensorMapL2promotion; +impl CUtensorMapFloatOOBfill_enum { + pub const CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE: CUtensorMapFloatOOBfill_enum = CUtensorMapFloatOOBfill_enum( + 0, + ); +} +impl CUtensorMapFloatOOBfill_enum { + pub const CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA: CUtensorMapFloatOOBfill_enum = CUtensorMapFloatOOBfill_enum( + 1, + ); +} +#[repr(transparent)] +/// Tensor map out-of-bounds fill type +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUtensorMapFloatOOBfill_enum(pub ::core::ffi::c_uint); +/// Tensor map out-of-bounds fill type +pub use self::CUtensorMapFloatOOBfill_enum as CUtensorMapFloatOOBfill; +/// GPU Direct v3 tokens +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st { + pub p2pToken: ::core::ffi::c_ulonglong, + pub vaSpaceToken: ::core::ffi::c_uint, +} +/// GPU Direct v3 tokens +pub type CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 = CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st; +/// GPU Direct v3 tokens +pub type CUDA_POINTER_ATTRIBUTE_P2P_TOKENS = CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1; +impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum { + ///< No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations + pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum( + 0, + ); +} +impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum { + ///< Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. + pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum( + 1, + ); +} +impl CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum { + ///< Read-write access, the device has full read-write access to the memory + pub const CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE: CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum = CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum( + 3, + ); +} +#[repr(transparent)] +/** Access flags that specify the level of access the current context's device has + on the memory referenced.*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum(pub ::core::ffi::c_uint); +/** Access flags that specify the level of access the current context's device has + on the memory referenced.*/ +pub use self::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum as CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS; +/// Kernel launch parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_LAUNCH_PARAMS_st { + ///< Kernel to launch + pub function: CUfunction, + ///< Width of grid in blocks + pub gridDimX: ::core::ffi::c_uint, + ///< Height of grid in blocks + pub gridDimY: ::core::ffi::c_uint, + ///< Depth of grid in blocks + pub gridDimZ: ::core::ffi::c_uint, + ///< X dimension of each thread block + pub blockDimX: ::core::ffi::c_uint, + ///< Y dimension of each thread block + pub blockDimY: ::core::ffi::c_uint, + ///< Z dimension of each thread block + pub blockDimZ: ::core::ffi::c_uint, + ///< Dynamic shared-memory size per thread block in bytes + pub sharedMemBytes: ::core::ffi::c_uint, + ///< Stream identifier + pub hStream: CUstream, + ///< Array of pointers to kernel parameters + pub kernelParams: *mut *mut ::core::ffi::c_void, +} +/// Kernel launch parameters +pub type CUDA_LAUNCH_PARAMS_v1 = CUDA_LAUNCH_PARAMS_st; +/// Kernel launch parameters +pub type CUDA_LAUNCH_PARAMS = CUDA_LAUNCH_PARAMS_v1; +impl CUexternalMemoryHandleType_enum { + /// Handle is an opaque file descriptor + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 1, + ); +} +impl CUexternalMemoryHandleType_enum { + /// Handle is an opaque shared NT handle + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 2, + ); +} +impl CUexternalMemoryHandleType_enum { + /// Handle is an opaque, globally shared handle + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 3, + ); +} +impl CUexternalMemoryHandleType_enum { + /// Handle is a D3D12 heap object + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 4, + ); +} +impl CUexternalMemoryHandleType_enum { + /// Handle is a D3D12 committed resource + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 5, + ); +} +impl CUexternalMemoryHandleType_enum { + /// Handle is a shared NT handle to a D3D11 resource + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 6, + ); +} +impl CUexternalMemoryHandleType_enum { + /// Handle is a globally shared handle to a D3D11 resource + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 7, + ); +} +impl CUexternalMemoryHandleType_enum { + /// Handle is an NvSciBuf object + pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF: CUexternalMemoryHandleType_enum = CUexternalMemoryHandleType_enum( + 8, + ); +} +#[repr(transparent)] +/// External memory handle types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUexternalMemoryHandleType_enum(pub ::core::ffi::c_uint); +/// External memory handle types +pub use self::CUexternalMemoryHandleType_enum as CUexternalMemoryHandleType; +/// External memory handle descriptor +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { + /// Type of the handle + pub type_: CUexternalMemoryHandleType, + pub handle: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1, + /// Size of the memory allocation + pub size: ::core::ffi::c_ulonglong, + /// Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1 { + /** File descriptor referencing the memory object. Valid + when type is + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD*/ + pub fd: ::core::ffi::c_int, + pub win32: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, + /** A handle representing an NvSciBuf Object. Valid when type + is ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF*/ + pub nvSciBufObject: *const ::core::ffi::c_void, +} +/** Win32 handle referencing the semaphore object. Valid when + type is one of the following: + - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 + - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP + - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + Exactly one of 'handle' and 'name' must be non-NULL. If + type is one of the following: + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + then 'name' must be NULL.*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { + /// Valid NT handle. Must be NULL if 'name' is non-NULL + pub handle: *mut ::core::ffi::c_void, + /** Name of a valid memory object. + Must be NULL if 'handle' is non-NULL.*/ + pub name: *const ::core::ffi::c_void, +} +/// External memory handle descriptor +pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st; +/// External memory handle descriptor +pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1; +/// External memory buffer descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { + /// Offset into the memory object where the buffer's base is + pub offset: ::core::ffi::c_ulonglong, + /// Size of the buffer + pub size: ::core::ffi::c_ulonglong, + /// Flags reserved for future use. Must be zero. + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +/// External memory buffer descriptor +pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st; +/// External memory buffer descriptor +pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1; +/// External memory mipmap descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { + /** Offset into the memory object where the base level of the + mipmap chain is.*/ + pub offset: ::core::ffi::c_ulonglong, + /// Format, dimension and type of base level of the mipmap chain + pub arrayDesc: CUDA_ARRAY3D_DESCRIPTOR, + /// Total number of levels in the mipmap chain + pub numLevels: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +/// External memory mipmap descriptor +pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st; +/// External memory mipmap descriptor +pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1; +impl CUexternalSemaphoreHandleType_enum { + /// Handle is an opaque file descriptor + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 1, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is an opaque shared NT handle + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 2, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is an opaque, globally shared handle + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 3, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is a shared NT handle referencing a D3D12 fence object + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 4, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is a shared NT handle referencing a D3D11 fence object + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 5, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Opaque handle to NvSciSync Object + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 6, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is a shared NT handle referencing a D3D11 keyed mutex object + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 7, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is a globally shared handle referencing a D3D11 keyed mutex object + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 8, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is an opaque file descriptor referencing a timeline semaphore + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 9, + ); +} +impl CUexternalSemaphoreHandleType_enum { + /// Handle is an opaque shared NT handle referencing a timeline semaphore + pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32: CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum( + 10, + ); +} +#[repr(transparent)] +/// External semaphore handle types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUexternalSemaphoreHandleType_enum(pub ::core::ffi::c_uint); +/// External semaphore handle types +pub use self::CUexternalSemaphoreHandleType_enum as CUexternalSemaphoreHandleType; +/// External semaphore handle descriptor +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { + /// Type of the handle + pub type_: CUexternalSemaphoreHandleType, + pub handle: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1, + /// Flags reserved for the future. Must be zero. + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1 { + /** File descriptor referencing the semaphore object. Valid + when type is one of the following: + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD*/ + pub fd: ::core::ffi::c_int, + pub win32: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, + /// Valid NvSciSyncObj. Must be non NULL + pub nvSciSyncObj: *const ::core::ffi::c_void, +} +/** Win32 handle referencing the semaphore object. Valid when + type is one of the following: + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + Exactly one of 'handle' and 'name' must be non-NULL. If + type is one of the following: + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + then 'name' must be NULL.*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { + /// Valid NT handle. Must be NULL if 'name' is non-NULL + pub handle: *mut ::core::ffi::c_void, + /** Name of a valid synchronization primitive. + Must be NULL if 'handle' is non-NULL.*/ + pub name: *const ::core::ffi::c_void, +} +/// External semaphore handle descriptor +pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st; +/// External semaphore handle descriptor +pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1; +/// External semaphore signal parameters +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { + pub params: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1, + /** Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to + signal a ::CUexternalSemaphore of type + ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is + ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which indicates + that while signaling the ::CUexternalSemaphore, no memory synchronization + operations should be performed for any external memory object imported + as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. + For all other types of ::CUexternalSemaphore, flags must be zero.*/ + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 { + pub fence: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1, + pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2, + pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3, + pub reserved: [::core::ffi::c_uint; 12usize], +} +/// Parameters for fence objects +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { + /// Value of fence to be signaled + pub value: ::core::ffi::c_ulonglong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2 { + /** Pointer to NvSciSyncFence. Valid if ::CUexternalSemaphoreHandleType + is of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.*/ + pub fence: *mut ::core::ffi::c_void, + pub reserved: ::core::ffi::c_ulonglong, +} +/// Parameters for keyed mutex objects +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { + /// Value of key to release the mutex with + pub key: ::core::ffi::c_ulonglong, +} +/// External semaphore signal parameters +pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st; +/// External semaphore signal parameters +pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1; +/// External semaphore wait parameters +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { + pub params: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1, + /** Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on + a ::CUexternalSemaphore of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + the valid flag is ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + which indicates that while waiting for the ::CUexternalSemaphore, no memory + synchronization operations should be performed for any external memory + object imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. + For all other types of ::CUexternalSemaphore, flags must be zero.*/ + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 { + pub fence: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1, + pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2, + pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3, + pub reserved: [::core::ffi::c_uint; 10usize], +} +/// Parameters for fence objects +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { + /// Value of fence to be waited on + pub value: ::core::ffi::c_ulonglong, +} +/** Pointer to NvSciSyncFence. Valid if CUexternalSemaphoreHandleType + is of type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC.*/ +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2 { + pub fence: *mut ::core::ffi::c_void, + pub reserved: ::core::ffi::c_ulonglong, +} +/// Parameters for keyed mutex objects +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { + /// Value of key to acquire the mutex with + pub key: ::core::ffi::c_ulonglong, + /// Timeout in milliseconds to wait to acquire the mutex + pub timeoutMs: ::core::ffi::c_uint, +} +/// External semaphore wait parameters +pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st; +/// External semaphore wait parameters +pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1; +/// Semaphore signal node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st { + ///< Array of external semaphore handles. + pub extSemArray: *mut CUexternalSemaphore, + ///< Array of external semaphore signal parameters. + pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, + ///< Number of handles and parameters supplied in extSemArray and paramsArray. + pub numExtSems: ::core::ffi::c_uint, +} +/// Semaphore signal node parameters +pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st; +/// Semaphore signal node parameters +pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1; +/// Semaphore signal node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st { + ///< Array of external semaphore handles. + pub extSemArray: *mut CUexternalSemaphore, + ///< Array of external semaphore signal parameters. + pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, + ///< Number of handles and parameters supplied in extSemArray and paramsArray. + pub numExtSems: ::core::ffi::c_uint, +} +/// Semaphore signal node parameters +pub type CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2 = CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st; +/// Semaphore wait node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st { + ///< Array of external semaphore handles. + pub extSemArray: *mut CUexternalSemaphore, + ///< Array of external semaphore wait parameters. + pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, + ///< Number of handles and parameters supplied in extSemArray and paramsArray. + pub numExtSems: ::core::ffi::c_uint, +} +/// Semaphore wait node parameters +pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 = CUDA_EXT_SEM_WAIT_NODE_PARAMS_st; +/// Semaphore wait node parameters +pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS = CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1; +/// Semaphore wait node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st { + ///< Array of external semaphore handles. + pub extSemArray: *mut CUexternalSemaphore, + ///< Array of external semaphore wait parameters. + pub paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, + ///< Number of handles and parameters supplied in extSemArray and paramsArray. + pub numExtSems: ::core::ffi::c_uint, +} +/// Semaphore wait node parameters +pub type CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2 = CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st; +pub type CUmemGenericAllocationHandle_v1 = ::core::ffi::c_ulonglong; +pub type CUmemGenericAllocationHandle = CUmemGenericAllocationHandle_v1; +impl CUmemAllocationHandleType_enum { + ///< Does not allow any export mechanism. > + pub const CU_MEM_HANDLE_TYPE_NONE: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum( + 0, + ); +} +impl CUmemAllocationHandleType_enum { + ///< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) + pub const CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum( + 1, + ); +} +impl CUmemAllocationHandleType_enum { + ///< Allows a Win32 NT handle to be used for exporting. (HANDLE) + pub const CU_MEM_HANDLE_TYPE_WIN32: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum( + 2, + ); +} +impl CUmemAllocationHandleType_enum { + ///< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) + pub const CU_MEM_HANDLE_TYPE_WIN32_KMT: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum( + 4, + ); +} +impl CUmemAllocationHandleType_enum { + ///< Allows a fabric handle to be used for exporting. (CUmemFabricHandle) + pub const CU_MEM_HANDLE_TYPE_FABRIC: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum( + 8, + ); +} +impl CUmemAllocationHandleType_enum { + pub const CU_MEM_HANDLE_TYPE_MAX: CUmemAllocationHandleType_enum = CUmemAllocationHandleType_enum( + 2147483647, + ); +} +#[repr(transparent)] +/// Flags for specifying particular handle types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAllocationHandleType_enum(pub ::core::ffi::c_uint); +/// Flags for specifying particular handle types +pub use self::CUmemAllocationHandleType_enum as CUmemAllocationHandleType; +impl CUmemAccess_flags_enum { + ///< Default, make the address range not accessible + pub const CU_MEM_ACCESS_FLAGS_PROT_NONE: CUmemAccess_flags_enum = CUmemAccess_flags_enum( + 0, + ); +} +impl CUmemAccess_flags_enum { + ///< Make the address range read accessible + pub const CU_MEM_ACCESS_FLAGS_PROT_READ: CUmemAccess_flags_enum = CUmemAccess_flags_enum( + 1, + ); +} +impl CUmemAccess_flags_enum { + ///< Make the address range read-write accessible + pub const CU_MEM_ACCESS_FLAGS_PROT_READWRITE: CUmemAccess_flags_enum = CUmemAccess_flags_enum( + 3, + ); +} +impl CUmemAccess_flags_enum { + pub const CU_MEM_ACCESS_FLAGS_PROT_MAX: CUmemAccess_flags_enum = CUmemAccess_flags_enum( + 2147483647, + ); +} +#[repr(transparent)] +/// Specifies the memory protection flags for mapping. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAccess_flags_enum(pub ::core::ffi::c_uint); +/// Specifies the memory protection flags for mapping. +pub use self::CUmemAccess_flags_enum as CUmemAccess_flags; +impl CUmemLocationType_enum { + pub const CU_MEM_LOCATION_TYPE_INVALID: CUmemLocationType_enum = CUmemLocationType_enum( + 0, + ); +} +impl CUmemLocationType_enum { + ///< Location is a device location, thus id is a device ordinal + pub const CU_MEM_LOCATION_TYPE_DEVICE: CUmemLocationType_enum = CUmemLocationType_enum( + 1, + ); +} +impl CUmemLocationType_enum { + ///< Location is host, id is ignored + pub const CU_MEM_LOCATION_TYPE_HOST: CUmemLocationType_enum = CUmemLocationType_enum( + 2, + ); +} +impl CUmemLocationType_enum { + ///< Location is a host NUMA node, thus id is a host NUMA node id + pub const CU_MEM_LOCATION_TYPE_HOST_NUMA: CUmemLocationType_enum = CUmemLocationType_enum( + 3, + ); +} +impl CUmemLocationType_enum { + ///< Location is a host NUMA node of the current thread, id is ignored + pub const CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT: CUmemLocationType_enum = CUmemLocationType_enum( + 4, + ); +} +impl CUmemLocationType_enum { + pub const CU_MEM_LOCATION_TYPE_MAX: CUmemLocationType_enum = CUmemLocationType_enum( + 2147483647, + ); +} +#[repr(transparent)] +/// Specifies the type of location +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemLocationType_enum(pub ::core::ffi::c_uint); +/// Specifies the type of location +pub use self::CUmemLocationType_enum as CUmemLocationType; +impl CUmemAllocationType_enum { + pub const CU_MEM_ALLOCATION_TYPE_INVALID: CUmemAllocationType_enum = CUmemAllocationType_enum( + 0, + ); +} +impl CUmemAllocationType_enum { + /** This allocation type is 'pinned', i.e. cannot migrate from its current + location while the application is actively using it*/ + pub const CU_MEM_ALLOCATION_TYPE_PINNED: CUmemAllocationType_enum = CUmemAllocationType_enum( + 1, + ); +} +impl CUmemAllocationType_enum { + /** This allocation type is 'pinned', i.e. cannot migrate from its current + location while the application is actively using it*/ + pub const CU_MEM_ALLOCATION_TYPE_MAX: CUmemAllocationType_enum = CUmemAllocationType_enum( + 2147483647, + ); +} +#[repr(transparent)] +/// Defines the allocation types available +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAllocationType_enum(pub ::core::ffi::c_uint); +/// Defines the allocation types available +pub use self::CUmemAllocationType_enum as CUmemAllocationType; +impl CUmemAllocationGranularity_flags_enum { + ///< Minimum required granularity for allocation + pub const CU_MEM_ALLOC_GRANULARITY_MINIMUM: CUmemAllocationGranularity_flags_enum = CUmemAllocationGranularity_flags_enum( + 0, + ); +} +impl CUmemAllocationGranularity_flags_enum { + ///< Recommended granularity for allocation for best performance + pub const CU_MEM_ALLOC_GRANULARITY_RECOMMENDED: CUmemAllocationGranularity_flags_enum = CUmemAllocationGranularity_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Flag for requesting different optimal and required granularities for an allocation. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAllocationGranularity_flags_enum(pub ::core::ffi::c_uint); +/// Flag for requesting different optimal and required granularities for an allocation. +pub use self::CUmemAllocationGranularity_flags_enum as CUmemAllocationGranularity_flags; +impl CUmemRangeHandleType_enum { + pub const CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD: CUmemRangeHandleType_enum = CUmemRangeHandleType_enum( + 1, + ); +} +impl CUmemRangeHandleType_enum { + pub const CU_MEM_RANGE_HANDLE_TYPE_MAX: CUmemRangeHandleType_enum = CUmemRangeHandleType_enum( + 2147483647, + ); +} +#[repr(transparent)] +/// Specifies the handle type for address range +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemRangeHandleType_enum(pub ::core::ffi::c_uint); +/// Specifies the handle type for address range +pub use self::CUmemRangeHandleType_enum as CUmemRangeHandleType; +impl CUarraySparseSubresourceType_enum { + pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL: CUarraySparseSubresourceType_enum = CUarraySparseSubresourceType_enum( + 0, + ); +} +impl CUarraySparseSubresourceType_enum { + pub const CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL: CUarraySparseSubresourceType_enum = CUarraySparseSubresourceType_enum( + 1, + ); +} +#[repr(transparent)] +/// Sparse subresource types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUarraySparseSubresourceType_enum(pub ::core::ffi::c_uint); +/// Sparse subresource types +pub use self::CUarraySparseSubresourceType_enum as CUarraySparseSubresourceType; +impl CUmemOperationType_enum { + pub const CU_MEM_OPERATION_TYPE_MAP: CUmemOperationType_enum = CUmemOperationType_enum( + 1, + ); +} +impl CUmemOperationType_enum { + pub const CU_MEM_OPERATION_TYPE_UNMAP: CUmemOperationType_enum = CUmemOperationType_enum( + 2, + ); +} +#[repr(transparent)] +/// Memory operation types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemOperationType_enum(pub ::core::ffi::c_uint); +/// Memory operation types +pub use self::CUmemOperationType_enum as CUmemOperationType; +impl CUmemHandleType_enum { + pub const CU_MEM_HANDLE_TYPE_GENERIC: CUmemHandleType_enum = CUmemHandleType_enum(0); +} +#[repr(transparent)] +/// Memory handle types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemHandleType_enum(pub ::core::ffi::c_uint); +/// Memory handle types +pub use self::CUmemHandleType_enum as CUmemHandleType; +/// Specifies the CUDA array or CUDA mipmapped array memory mapping information +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUarrayMapInfo_st { + ///< Resource type + pub resourceType: CUresourcetype, + pub resource: CUarrayMapInfo_st__bindgen_ty_1, + ///< Sparse subresource type + pub subresourceType: CUarraySparseSubresourceType, + pub subresource: CUarrayMapInfo_st__bindgen_ty_2, + ///< Memory operation type + pub memOperationType: CUmemOperationType, + ///< Memory handle type + pub memHandleType: CUmemHandleType, + pub memHandle: CUarrayMapInfo_st__bindgen_ty_3, + ///< Offset within the memory + pub offset: ::core::ffi::c_ulonglong, + ///< Device ordinal bit mask + pub deviceBitMask: ::core::ffi::c_uint, + ///< flags for future use, must be zero now. + pub flags: ::core::ffi::c_uint, + ///< Reserved for future use, must be zero now. + pub reserved: [::core::ffi::c_uint; 2usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUarrayMapInfo_st__bindgen_ty_1 { + pub mipmap: CUmipmappedArray, + pub array: CUarray, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUarrayMapInfo_st__bindgen_ty_2 { + pub sparseLevel: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1, + pub miptail: CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 { + ///< For CUDA mipmapped arrays must a valid mipmap level. For CUDA arrays must be zero + pub level: ::core::ffi::c_uint, + ///< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero + pub layer: ::core::ffi::c_uint, + ///< Starting X offset in elements + pub offsetX: ::core::ffi::c_uint, + ///< Starting Y offset in elements + pub offsetY: ::core::ffi::c_uint, + ///< Starting Z offset in elements + pub offsetZ: ::core::ffi::c_uint, + ///< Width in elements + pub extentWidth: ::core::ffi::c_uint, + ///< Height in elements + pub extentHeight: ::core::ffi::c_uint, + ///< Depth in elements + pub extentDepth: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 { + ///< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero + pub layer: ::core::ffi::c_uint, + ///< Offset within mip tail + pub offset: ::core::ffi::c_ulonglong, + ///< Extent in bytes + pub size: ::core::ffi::c_ulonglong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUarrayMapInfo_st__bindgen_ty_3 { + pub memHandle: CUmemGenericAllocationHandle, +} +/// Specifies the CUDA array or CUDA mipmapped array memory mapping information +pub type CUarrayMapInfo_v1 = CUarrayMapInfo_st; +/// Specifies the CUDA array or CUDA mipmapped array memory mapping information +pub type CUarrayMapInfo = CUarrayMapInfo_v1; +/// Specifies a memory location. +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemLocation_st { + ///< Specifies the location type, which modifies the meaning of id. + pub type_: CUmemLocationType, + ///< identifier for a given this location's ::CUmemLocationType. + pub id: ::core::ffi::c_int, +} +/// Specifies a memory location. +pub type CUmemLocation_v1 = CUmemLocation_st; +/// Specifies a memory location. +pub type CUmemLocation = CUmemLocation_v1; +impl CUmemAllocationCompType_enum { + ///< Allocating non-compressible memory + pub const CU_MEM_ALLOCATION_COMP_NONE: CUmemAllocationCompType_enum = CUmemAllocationCompType_enum( + 0, + ); +} +impl CUmemAllocationCompType_enum { + ///< Allocating compressible memory + pub const CU_MEM_ALLOCATION_COMP_GENERIC: CUmemAllocationCompType_enum = CUmemAllocationCompType_enum( + 1, + ); +} +#[repr(transparent)] +/// Specifies compression attribute for an allocation. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAllocationCompType_enum(pub ::core::ffi::c_uint); +/// Specifies compression attribute for an allocation. +pub use self::CUmemAllocationCompType_enum as CUmemAllocationCompType; +/// Specifies the allocation properties for a allocation. +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAllocationProp_st { + /// Allocation type + pub type_: CUmemAllocationType, + /// requested ::CUmemAllocationHandleType + pub requestedHandleTypes: CUmemAllocationHandleType, + /// Location of allocation + pub location: CUmemLocation, + /** Windows-specific POBJECT_ATTRIBUTES required when + ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes structure + includes security attributes that define + the scope of which exported allocations may be transferred to other + processes. In all other cases, this field is required to be zero.*/ + pub win32HandleMetaData: *mut ::core::ffi::c_void, + pub allocFlags: CUmemAllocationProp_st__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAllocationProp_st__bindgen_ty_1 { + /** Allocation hint for requesting compressible memory. + On devices that support Compute Data Compression, compressible + memory can be used to accelerate accesses to data with unstructured + sparsity and other compressible data patterns. Applications are + expected to query allocation property of the handle obtained with + ::cuMemCreate using ::cuMemGetAllocationPropertiesFromHandle to + validate if the obtained allocation is compressible or not. Note that + compressed memory may not be mappable on all devices.*/ + pub compressionType: ::core::ffi::c_uchar, + pub gpuDirectRDMACapable: ::core::ffi::c_uchar, + /// Bitmask indicating intended usage for this allocation + pub usage: ::core::ffi::c_ushort, + pub reserved: [::core::ffi::c_uchar; 4usize], +} +/// Specifies the allocation properties for a allocation. +pub type CUmemAllocationProp_v1 = CUmemAllocationProp_st; +/// Specifies the allocation properties for a allocation. +pub type CUmemAllocationProp = CUmemAllocationProp_v1; +impl CUmulticastGranularity_flags_enum { + ///< Minimum required granularity + pub const CU_MULTICAST_GRANULARITY_MINIMUM: CUmulticastGranularity_flags_enum = CUmulticastGranularity_flags_enum( + 0, + ); +} +impl CUmulticastGranularity_flags_enum { + ///< Recommended granularity for best performance + pub const CU_MULTICAST_GRANULARITY_RECOMMENDED: CUmulticastGranularity_flags_enum = CUmulticastGranularity_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Flags for querying different granularities for a multicast object +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmulticastGranularity_flags_enum(pub ::core::ffi::c_uint); +/// Flags for querying different granularities for a multicast object +pub use self::CUmulticastGranularity_flags_enum as CUmulticastGranularity_flags; +/// Specifies the properties for a multicast object. +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmulticastObjectProp_st { + /** The number of devices in the multicast team that will bind memory to this + object*/ + pub numDevices: ::core::ffi::c_uint, + /** The maximum amount of memory that can be bound to this multicast object + per device*/ + pub size: usize, + /** Bitmask of exportable handle types (see ::CUmemAllocationHandleType) for + this object*/ + pub handleTypes: ::core::ffi::c_ulonglong, + /// Flags for future use, must be zero now + pub flags: ::core::ffi::c_ulonglong, +} +/// Specifies the properties for a multicast object. +pub type CUmulticastObjectProp_v1 = CUmulticastObjectProp_st; +/// Specifies the properties for a multicast object. +pub type CUmulticastObjectProp = CUmulticastObjectProp_v1; +/// Memory access descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemAccessDesc_st { + ///< Location on which the request is to change it's accessibility + pub location: CUmemLocation, + ///< ::CUmemProt accessibility flags to set on the request + pub flags: CUmemAccess_flags, +} +/// Memory access descriptor +pub type CUmemAccessDesc_v1 = CUmemAccessDesc_st; +/// Memory access descriptor +pub type CUmemAccessDesc = CUmemAccessDesc_v1; +impl CUgraphExecUpdateResult_enum { + ///< The update succeeded + pub const CU_GRAPH_EXEC_UPDATE_SUCCESS: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 0, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed for an unexpected reason which is described in the return value of the function + pub const CU_GRAPH_EXEC_UPDATE_ERROR: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 1, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed because the topology changed + pub const CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 2, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed because a node type changed + pub const CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 3, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed because the function of a kernel node changed (CUDA driver < 11.2) + pub const CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 4, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed because the parameters changed in a way that is not supported + pub const CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 5, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed because something about the node is not supported + pub const CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 6, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed because the function of a kernel node changed in an unsupported way + pub const CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 7, + ); +} +impl CUgraphExecUpdateResult_enum { + ///< The update failed because the node attributes changed in a way that is not supported + pub const CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED: CUgraphExecUpdateResult_enum = CUgraphExecUpdateResult_enum( + 8, + ); +} +#[repr(transparent)] +/// CUDA Graph Update error types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphExecUpdateResult_enum(pub ::core::ffi::c_uint); +/// CUDA Graph Update error types +pub use self::CUgraphExecUpdateResult_enum as CUgraphExecUpdateResult; +/// Result information returned by cuGraphExecUpdate +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphExecUpdateResultInfo_st { + /// Gives more specific detail when a cuda graph update fails. + pub result: CUgraphExecUpdateResult, + /** The "to node" of the error edge when the topologies do not match. + The error node when the error is associated with a specific node. + NULL when the error is generic.*/ + pub errorNode: CUgraphNode, + /// The from node of error edge when the topologies do not match. Otherwise NULL. + pub errorFromNode: CUgraphNode, +} +/// Result information returned by cuGraphExecUpdate +pub type CUgraphExecUpdateResultInfo_v1 = CUgraphExecUpdateResultInfo_st; +/// Result information returned by cuGraphExecUpdate +pub type CUgraphExecUpdateResultInfo = CUgraphExecUpdateResultInfo_v1; +impl CUmemPool_attribute_enum { + /** (value type = int) + Allow cuMemAllocAsync to use memory asynchronously freed + in another streams as long as a stream ordering dependency + of the allocating stream on the free action exists. + Cuda events and null stream interactions can create the required + stream ordered dependencies. (default enabled)*/ + pub const CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 1, + ); +} +impl CUmemPool_attribute_enum { + /** (value type = int) + Allow reuse of already completed frees when there is no dependency + between the free and allocation. (default enabled)*/ + pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 2, + ); +} +impl CUmemPool_attribute_enum { + /** (value type = int) + Allow cuMemAllocAsync to insert new stream dependencies + in order to establish the stream ordering required to reuse + a piece of memory released by cuFreeAsync (default enabled).*/ + pub const CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 3, + ); +} +impl CUmemPool_attribute_enum { + /** (value type = cuuint64_t) + Amount of reserved memory in bytes to hold onto before trying + to release memory back to the OS. When more than the release + threshold bytes of memory are held by the memory pool, the + allocator will try to release memory back to the OS on the + next call to stream, event or context synchronize. (default 0)*/ + pub const CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 4, + ); +} +impl CUmemPool_attribute_enum { + /** (value type = cuuint64_t) + Amount of backing memory currently allocated for the mempool.*/ + pub const CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 5, + ); +} +impl CUmemPool_attribute_enum { + /** (value type = cuuint64_t) + High watermark of backing memory allocated for the mempool since the + last time it was reset. High watermark can only be reset to zero.*/ + pub const CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 6, + ); +} +impl CUmemPool_attribute_enum { + /** (value type = cuuint64_t) + Amount of memory from the pool that is currently in use by the application.*/ + pub const CU_MEMPOOL_ATTR_USED_MEM_CURRENT: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 7, + ); +} +impl CUmemPool_attribute_enum { + /** (value type = cuuint64_t) + High watermark of the amount of memory from the pool that was in use by the application since + the last time it was reset. High watermark can only be reset to zero.*/ + pub const CU_MEMPOOL_ATTR_USED_MEM_HIGH: CUmemPool_attribute_enum = CUmemPool_attribute_enum( + 8, + ); +} +#[repr(transparent)] +/// CUDA memory pool attributes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemPool_attribute_enum(pub ::core::ffi::c_uint); +/// CUDA memory pool attributes +pub use self::CUmemPool_attribute_enum as CUmemPool_attribute; +/// Specifies the properties of allocations made from the pool. +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemPoolProps_st { + ///< Allocation type. Currently must be specified as CU_MEM_ALLOCATION_TYPE_PINNED + pub allocType: CUmemAllocationType, + ///< Handle types that will be supported by allocations from the pool. + pub handleTypes: CUmemAllocationHandleType, + ///< Location where allocations should reside. + pub location: CUmemLocation, + /** Windows-specific LPSECURITYATTRIBUTES required when + ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute defines + the scope of which exported allocations may be transferred to other + processes. In all other cases, this field is required to be zero.*/ + pub win32SecurityAttributes: *mut ::core::ffi::c_void, + ///< Maximum pool size. When set to 0, defaults to a system dependent value. + pub maxSize: usize, + ///< reserved for future use, must be 0 + pub reserved: [::core::ffi::c_uchar; 56usize], +} +/// Specifies the properties of allocations made from the pool. +pub type CUmemPoolProps_v1 = CUmemPoolProps_st; +/// Specifies the properties of allocations made from the pool. +pub type CUmemPoolProps = CUmemPoolProps_v1; +/// Opaque data for exporting a pool allocation +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmemPoolPtrExportData_st { + pub reserved: [::core::ffi::c_uchar; 64usize], +} +/// Opaque data for exporting a pool allocation +pub type CUmemPoolPtrExportData_v1 = CUmemPoolPtrExportData_st; +/// Opaque data for exporting a pool allocation +pub type CUmemPoolPtrExportData = CUmemPoolPtrExportData_v1; +/// Memory allocation node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEM_ALLOC_NODE_PARAMS_v1_st { + /** in: location where the allocation should reside (specified in ::location). + ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported.*/ + pub poolProps: CUmemPoolProps, + ///< in: array of memory access descriptors. Used to describe peer GPU access + pub accessDescs: *const CUmemAccessDesc, + ///< in: number of memory access descriptors. Must not exceed the number of GPUs. + pub accessDescCount: usize, + ///< in: size in bytes of the requested allocation + pub bytesize: usize, + ///< out: address of the allocation returned by CUDA + pub dptr: CUdeviceptr, +} +/// Memory allocation node parameters +pub type CUDA_MEM_ALLOC_NODE_PARAMS_v1 = CUDA_MEM_ALLOC_NODE_PARAMS_v1_st; +/// Memory allocation node parameters +pub type CUDA_MEM_ALLOC_NODE_PARAMS = CUDA_MEM_ALLOC_NODE_PARAMS_v1; +/// Memory allocation node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEM_ALLOC_NODE_PARAMS_v2_st { + /** in: location where the allocation should reside (specified in ::location). + ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported.*/ + pub poolProps: CUmemPoolProps, + ///< in: array of memory access descriptors. Used to describe peer GPU access + pub accessDescs: *const CUmemAccessDesc, + ///< in: number of memory access descriptors. Must not exceed the number of GPUs. + pub accessDescCount: usize, + ///< in: size in bytes of the requested allocation + pub bytesize: usize, + ///< out: address of the allocation returned by CUDA + pub dptr: CUdeviceptr, +} +/// Memory allocation node parameters +pub type CUDA_MEM_ALLOC_NODE_PARAMS_v2 = CUDA_MEM_ALLOC_NODE_PARAMS_v2_st; +/// Memory free node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEM_FREE_NODE_PARAMS_st { + ///< in: the pointer to free + pub dptr: CUdeviceptr, +} +/// Memory free node parameters +pub type CUDA_MEM_FREE_NODE_PARAMS = CUDA_MEM_FREE_NODE_PARAMS_st; +impl CUgraphMem_attribute_enum { + /** (value type = cuuint64_t) + Amount of memory, in bytes, currently associated with graphs*/ + pub const CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum( + 0, + ); +} +impl CUgraphMem_attribute_enum { + /** (value type = cuuint64_t) + High watermark of memory, in bytes, associated with graphs since the + last time it was reset. High watermark can only be reset to zero.*/ + pub const CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum( + 1, + ); +} +impl CUgraphMem_attribute_enum { + /** (value type = cuuint64_t) + Amount of memory, in bytes, currently allocated for use by + the CUDA graphs asynchronous allocator.*/ + pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum( + 2, + ); +} +impl CUgraphMem_attribute_enum { + /** (value type = cuuint64_t) + High watermark of memory, in bytes, currently allocated for use by + the CUDA graphs asynchronous allocator.*/ + pub const CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: CUgraphMem_attribute_enum = CUgraphMem_attribute_enum( + 3, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphMem_attribute_enum(pub ::core::ffi::c_uint); +pub use self::CUgraphMem_attribute_enum as CUgraphMem_attribute; +/// Child graph node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_CHILD_GRAPH_NODE_PARAMS_st { + /**< The child graph to clone into the node for node creation, or +a handle to the graph owned by the node for node query*/ + pub graph: CUgraph, +} +/// Child graph node parameters +pub type CUDA_CHILD_GRAPH_NODE_PARAMS = CUDA_CHILD_GRAPH_NODE_PARAMS_st; +/// Event record node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EVENT_RECORD_NODE_PARAMS_st { + ///< The event to record when the node executes + pub event: CUevent, +} +/// Event record node parameters +pub type CUDA_EVENT_RECORD_NODE_PARAMS = CUDA_EVENT_RECORD_NODE_PARAMS_st; +/// Event wait node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_EVENT_WAIT_NODE_PARAMS_st { + ///< The event to wait on from the node + pub event: CUevent, +} +/// Event wait node parameters +pub type CUDA_EVENT_WAIT_NODE_PARAMS = CUDA_EVENT_WAIT_NODE_PARAMS_st; +/// Graph node parameters. See ::cuGraphAddNode. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUgraphNodeParams_st { + ///< Type of the node + pub type_: CUgraphNodeType, + ///< Reserved. Must be zero. + pub reserved0: [::core::ffi::c_int; 3usize], + pub __bindgen_anon_1: CUgraphNodeParams_st__bindgen_ty_1, + ///< Reserved bytes. Must be zero. + pub reserved2: ::core::ffi::c_longlong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUgraphNodeParams_st__bindgen_ty_1 { + ///< Padding. Unused bytes must be zero. + pub reserved1: [::core::ffi::c_longlong; 29usize], + ///< Kernel node parameters. + pub kernel: CUDA_KERNEL_NODE_PARAMS_v3, + ///< Memcpy node parameters. + pub memcpy: CUDA_MEMCPY_NODE_PARAMS, + ///< Memset node parameters. + pub memset: CUDA_MEMSET_NODE_PARAMS_v2, + ///< Host node parameters. + pub host: CUDA_HOST_NODE_PARAMS_v2, + ///< Child graph node parameters. + pub graph: CUDA_CHILD_GRAPH_NODE_PARAMS, + ///< Event wait node parameters. + pub eventWait: CUDA_EVENT_WAIT_NODE_PARAMS, + ///< Event record node parameters. + pub eventRecord: CUDA_EVENT_RECORD_NODE_PARAMS, + ///< External semaphore signal node parameters. + pub extSemSignal: CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2, + ///< External semaphore wait node parameters. + pub extSemWait: CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2, + ///< Memory allocation node parameters. + pub alloc: CUDA_MEM_ALLOC_NODE_PARAMS_v2, + ///< Memory free node parameters. + pub free: CUDA_MEM_FREE_NODE_PARAMS, + ///< MemOp node parameters. + pub memOp: CUDA_BATCH_MEM_OP_NODE_PARAMS_v2, + ///< Conditional node parameters. + pub conditional: CUDA_CONDITIONAL_NODE_PARAMS, +} +/// Graph node parameters. See ::cuGraphAddNode. +pub type CUgraphNodeParams = CUgraphNodeParams_st; +impl CUflushGPUDirectRDMAWritesOptions_enum { + ///< ::cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device. + pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST: CUflushGPUDirectRDMAWritesOptions_enum = CUflushGPUDirectRDMAWritesOptions_enum( + 1, + ); +} +impl CUflushGPUDirectRDMAWritesOptions_enum { + ///< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. + pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS: CUflushGPUDirectRDMAWritesOptions_enum = CUflushGPUDirectRDMAWritesOptions_enum( + 2, + ); +} +#[repr(transparent)] +/// Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUflushGPUDirectRDMAWritesOptions_enum(pub ::core::ffi::c_uint); +/// Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS +pub use self::CUflushGPUDirectRDMAWritesOptions_enum as CUflushGPUDirectRDMAWritesOptions; +impl CUGPUDirectRDMAWritesOrdering_enum { + ///< The device does not natively support ordering of remote writes. ::cuFlushGPUDirectRDMAWrites() can be leveraged if supported. + pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum( + 0, + ); +} +impl CUGPUDirectRDMAWritesOrdering_enum { + ///< Natively, the device can consistently consume remote writes, although other CUDA devices may not. + pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum( + 100, + ); +} +impl CUGPUDirectRDMAWritesOrdering_enum { + ///< Any CUDA device in the system can consistently consume remote writes to this device. + pub const CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES: CUGPUDirectRDMAWritesOrdering_enum = CUGPUDirectRDMAWritesOrdering_enum( + 200, + ); +} +#[repr(transparent)] +/// Platform native ordering for GPUDirect RDMA writes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUGPUDirectRDMAWritesOrdering_enum(pub ::core::ffi::c_uint); +/// Platform native ordering for GPUDirect RDMA writes +pub use self::CUGPUDirectRDMAWritesOrdering_enum as CUGPUDirectRDMAWritesOrdering; +impl CUflushGPUDirectRDMAWritesScope_enum { + ///< Blocks until remote writes are visible to the CUDA device context owning the data. + pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER: CUflushGPUDirectRDMAWritesScope_enum = CUflushGPUDirectRDMAWritesScope_enum( + 100, + ); +} +impl CUflushGPUDirectRDMAWritesScope_enum { + ///< Blocks until remote writes are visible to all CUDA device contexts. + pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES: CUflushGPUDirectRDMAWritesScope_enum = CUflushGPUDirectRDMAWritesScope_enum( + 200, + ); +} +#[repr(transparent)] +/// The scopes for ::cuFlushGPUDirectRDMAWrites +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUflushGPUDirectRDMAWritesScope_enum(pub ::core::ffi::c_uint); +/// The scopes for ::cuFlushGPUDirectRDMAWrites +pub use self::CUflushGPUDirectRDMAWritesScope_enum as CUflushGPUDirectRDMAWritesScope; +impl CUflushGPUDirectRDMAWritesTarget_enum { + ///< Sets the target for ::cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context. + pub const CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX: CUflushGPUDirectRDMAWritesTarget_enum = CUflushGPUDirectRDMAWritesTarget_enum( + 0, + ); +} +#[repr(transparent)] +/// The targets for ::cuFlushGPUDirectRDMAWrites +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUflushGPUDirectRDMAWritesTarget_enum(pub ::core::ffi::c_uint); +/// The targets for ::cuFlushGPUDirectRDMAWrites +pub use self::CUflushGPUDirectRDMAWritesTarget_enum as CUflushGPUDirectRDMAWritesTarget; +impl CUgraphDebugDot_flags_enum { + ///< Output all debug data as if every debug flag is enabled + pub const CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 1, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Use CUDA Runtime structures for output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 2, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUDA_KERNEL_NODE_PARAMS values to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 4, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUDA_MEMCPY3D values to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 8, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUDA_MEMSET_NODE_PARAMS values to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 16, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUDA_HOST_NODE_PARAMS values to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 32, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUevent handle from record and wait nodes to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 64, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 128, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 256, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds CUkernelNodeAttrValue values to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 512, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds node handles and every kernel function handle to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 1024, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds memory alloc node parameters to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 2048, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds memory free node parameters to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 4096, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds batch mem op node parameters to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 8192, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds edge numbering information + pub const CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 16384, + ); +} +impl CUgraphDebugDot_flags_enum { + ///< Adds conditional node parameters to output + pub const CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS: CUgraphDebugDot_flags_enum = CUgraphDebugDot_flags_enum( + 32768, + ); +} +#[repr(transparent)] +/// The additional write options for ::cuGraphDebugDotPrint +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphDebugDot_flags_enum(pub ::core::ffi::c_uint); +/// The additional write options for ::cuGraphDebugDotPrint +pub use self::CUgraphDebugDot_flags_enum as CUgraphDebugDot_flags; +impl CUuserObject_flags_enum { + ///< Indicates the destructor execution is not synchronized by any CUDA handle. + pub const CU_USER_OBJECT_NO_DESTRUCTOR_SYNC: CUuserObject_flags_enum = CUuserObject_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Flags for user objects for graphs +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUuserObject_flags_enum(pub ::core::ffi::c_uint); +/// Flags for user objects for graphs +pub use self::CUuserObject_flags_enum as CUuserObject_flags; +impl CUuserObjectRetain_flags_enum { + ///< Transfer references from the caller rather than creating new references. + pub const CU_GRAPH_USER_OBJECT_MOVE: CUuserObjectRetain_flags_enum = CUuserObjectRetain_flags_enum( + 1, + ); +} +#[repr(transparent)] +/// Flags for retaining user object references for graphs +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUuserObjectRetain_flags_enum(pub ::core::ffi::c_uint); +/// Flags for retaining user object references for graphs +pub use self::CUuserObjectRetain_flags_enum as CUuserObjectRetain_flags; +impl CUgraphInstantiate_flags_enum { + ///< Automatically free memory allocated in a graph before relaunching. + pub const CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum( + 1, + ); +} +impl CUgraphInstantiate_flags_enum { + /**< Automatically upload the graph after instantiation. Only supported by +::cuGraphInstantiateWithParams. The upload will be performed using the +stream provided in \p instantiateParams.*/ + pub const CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum( + 2, + ); +} +impl CUgraphInstantiate_flags_enum { + /**< Instantiate the graph to be launchable from the device. This flag can only +be used on platforms which support unified addressing. This flag cannot be +used in conjunction with CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH.*/ + pub const CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum( + 4, + ); +} +impl CUgraphInstantiate_flags_enum { + /**< Run the graph using the per-node priority attributes rather than the +priority of the stream it is launched into.*/ + pub const CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY: CUgraphInstantiate_flags_enum = CUgraphInstantiate_flags_enum( + 8, + ); +} +#[repr(transparent)] +/// Flags for instantiating a graph +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgraphInstantiate_flags_enum(pub ::core::ffi::c_uint); +/// Flags for instantiating a graph +pub use self::CUgraphInstantiate_flags_enum as CUgraphInstantiate_flags; +impl CUdeviceNumaConfig_enum { + ///< The GPU is not a NUMA node + pub const CU_DEVICE_NUMA_CONFIG_NONE: CUdeviceNumaConfig_enum = CUdeviceNumaConfig_enum( + 0, + ); +} +impl CUdeviceNumaConfig_enum { + ///< The GPU is a NUMA node, CU_DEVICE_ATTRIBUTE_NUMA_ID contains its NUMA ID + pub const CU_DEVICE_NUMA_CONFIG_NUMA_NODE: CUdeviceNumaConfig_enum = CUdeviceNumaConfig_enum( + 1, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdeviceNumaConfig_enum(pub ::core::ffi::c_uint); +pub use self::CUdeviceNumaConfig_enum as CUdeviceNumaConfig; +impl CUmoduleLoadingMode_enum { + ///< Lazy Kernel Loading is not enabled + pub const CU_MODULE_EAGER_LOADING: CUmoduleLoadingMode_enum = CUmoduleLoadingMode_enum( + 1, + ); +} +impl CUmoduleLoadingMode_enum { + ///< Lazy Kernel Loading is enabled + pub const CU_MODULE_LAZY_LOADING: CUmoduleLoadingMode_enum = CUmoduleLoadingMode_enum( + 2, + ); +} +#[repr(transparent)] +/// CUDA Lazy Loading status +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUmoduleLoadingMode_enum(pub ::core::ffi::c_uint); +/// CUDA Lazy Loading status +pub use self::CUmoduleLoadingMode_enum as CUmoduleLoadingMode; +impl CUfunctionLoadingState_enum { + pub const CU_FUNCTION_LOADING_STATE_UNLOADED: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum( + 0, + ); +} +impl CUfunctionLoadingState_enum { + pub const CU_FUNCTION_LOADING_STATE_LOADED: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum( + 1, + ); +} +impl CUfunctionLoadingState_enum { + pub const CU_FUNCTION_LOADING_STATE_MAX: CUfunctionLoadingState_enum = CUfunctionLoadingState_enum( + 2, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUfunctionLoadingState_enum(pub ::core::ffi::c_uint); +pub use self::CUfunctionLoadingState_enum as CUfunctionLoadingState; +impl CUcoredumpSettings_enum { + pub const CU_COREDUMP_ENABLE_ON_EXCEPTION: CUcoredumpSettings_enum = CUcoredumpSettings_enum( + 1, + ); +} +impl CUcoredumpSettings_enum { + pub const CU_COREDUMP_TRIGGER_HOST: CUcoredumpSettings_enum = CUcoredumpSettings_enum( + 2, + ); +} +impl CUcoredumpSettings_enum { + pub const CU_COREDUMP_LIGHTWEIGHT: CUcoredumpSettings_enum = CUcoredumpSettings_enum( + 3, + ); +} +impl CUcoredumpSettings_enum { + pub const CU_COREDUMP_ENABLE_USER_TRIGGER: CUcoredumpSettings_enum = CUcoredumpSettings_enum( + 4, + ); +} +impl CUcoredumpSettings_enum { + pub const CU_COREDUMP_FILE: CUcoredumpSettings_enum = CUcoredumpSettings_enum(5); +} +impl CUcoredumpSettings_enum { + pub const CU_COREDUMP_PIPE: CUcoredumpSettings_enum = CUcoredumpSettings_enum(6); +} +impl CUcoredumpSettings_enum { + pub const CU_COREDUMP_MAX: CUcoredumpSettings_enum = CUcoredumpSettings_enum(7); +} +#[repr(transparent)] +/// Flags for choosing a coredump attribute to get/set +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUcoredumpSettings_enum(pub ::core::ffi::c_uint); +/// Flags for choosing a coredump attribute to get/set +pub use self::CUcoredumpSettings_enum as CUcoredumpSettings; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUgreenCtx_st { + _unused: [u8; 0], +} +/** \typedef typedef struct CUgreenCtx_st* CUgreenCtx + A green context handle. This handle can be used safely from only one CPU thread at a time. + Created via ::cuGreenCtxCreate*/ +pub type CUgreenCtx = *mut CUgreenCtx_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUdevResourceDesc_st { + _unused: [u8; 0], +} +/** \typedef struct CUdevResourceDesc_st* CUdevResourceDesc; + An opaque descriptor handle. The descriptor encapsulates multiple created and configured resources. + Created via ::cuDevResourceGenerateDesc*/ +pub type CUdevResourceDesc = *mut CUdevResourceDesc_st; +impl CUgreenCtxCreate_flags { + ///< Required. Creates a default stream to use inside the green context + pub const CU_GREEN_CTX_DEFAULT_STREAM: CUgreenCtxCreate_flags = CUgreenCtxCreate_flags( + 1, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUgreenCtxCreate_flags(pub ::core::ffi::c_uint); +impl CUdevResourceType { + pub const CU_DEV_RESOURCE_TYPE_INVALID: CUdevResourceType = CUdevResourceType(0); +} +impl CUdevResourceType { + ///< Streaming multiprocessors related information + pub const CU_DEV_RESOURCE_TYPE_SM: CUdevResourceType = CUdevResourceType(1); +} +impl CUdevResourceType { + pub const CU_DEV_RESOURCE_TYPE_MAX: CUdevResourceType = CUdevResourceType(2); +} +#[repr(transparent)] +/** \typedef enum CUdevResourceType + Type of resource*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdevResourceType(pub ::core::ffi::c_uint); +/** \struct CUdevSmResource + Data for SM-related resources*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdevSmResource_st { + ///< The amount of streaming multiprocessors available in this resource. This is an output parameter only, do not write to this field. + pub smCount: ::core::ffi::c_uint, +} +/** \struct CUdevSmResource + Data for SM-related resources*/ +pub type CUdevSmResource = CUdevSmResource_st; +/** \struct CUdevResource + A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it. + \code + struct { + CUdevResourceType type; + union { + CUdevSmResource sm; + }; + }; + \endcode + - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed. + - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example, + \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/ +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUdevResource_st { + ///< Type of resource, dictates which union field was last set + pub type_: CUdevResourceType, + pub _internal_padding: [::core::ffi::c_uchar; 92usize], + pub __bindgen_anon_1: CUdevResource_st__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUdevResource_st__bindgen_ty_1 { + ///< Resource corresponding to CU_DEV_RESOURCE_TYPE_SM \p. type. + pub sm: CUdevSmResource, + pub _oversize: [::core::ffi::c_uchar; 48usize], +} +/** \struct CUdevResource + A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it. + \code + struct { + CUdevResourceType type; + union { + CUdevSmResource sm; + }; + }; + \endcode + - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed. + - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example, + \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/ +pub type CUdevResource_v1 = CUdevResource_st; +/** \struct CUdevResource + A tagged union describing different resources identified by the type field. This structure should not be directly modified outside of the API that created it. + \code + struct { + CUdevResourceType type; + union { + CUdevSmResource sm; + }; + }; + \endcode + - If \p type is \p CU_DEV_RESOURCE_TYPE_INVALID, this resoure is not valid and cannot be further accessed. + - If \p type is \p CU_DEV_RESOURCE_TYPE_SM, the ::CUdevSmResource structure \p sm is filled in. For example, + \p sm.smCount will reflect the amount of streaming multiprocessors available in this resource.*/ +pub type CUdevResource = CUdevResource_v1; +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUdeviceptr_v1(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMCPY2D_v1_st { + ///< Source X in bytes + pub srcXInBytes: ::core::ffi::c_uint, + ///< Source Y + pub srcY: ::core::ffi::c_uint, + ///< Source memory type (host, device, array) + pub srcMemoryType: CUmemorytype, + ///< Source host pointer + pub srcHost: *const ::core::ffi::c_void, + ///< Source device pointer + pub srcDevice: CUdeviceptr_v1, + ///< Source array reference + pub srcArray: CUarray, + ///< Source pitch (ignored when src is array) + pub srcPitch: ::core::ffi::c_uint, + ///< Destination X in bytes + pub dstXInBytes: ::core::ffi::c_uint, + ///< Destination Y + pub dstY: ::core::ffi::c_uint, + ///< Destination memory type (host, device, array) + pub dstMemoryType: CUmemorytype, + ///< Destination host pointer + pub dstHost: *mut ::core::ffi::c_void, + ///< Destination device pointer + pub dstDevice: CUdeviceptr_v1, + ///< Destination array reference + pub dstArray: CUarray, + ///< Destination pitch (ignored when dst is array) + pub dstPitch: ::core::ffi::c_uint, + ///< Width of 2D memory copy in bytes + pub WidthInBytes: ::core::ffi::c_uint, + ///< Height of 2D memory copy + pub Height: ::core::ffi::c_uint, +} +pub type CUDA_MEMCPY2D_v1 = CUDA_MEMCPY2D_v1_st; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_MEMCPY3D_v1_st { + ///< Source X in bytes + pub srcXInBytes: ::core::ffi::c_uint, + ///< Source Y + pub srcY: ::core::ffi::c_uint, + ///< Source Z + pub srcZ: ::core::ffi::c_uint, + ///< Source LOD + pub srcLOD: ::core::ffi::c_uint, + ///< Source memory type (host, device, array) + pub srcMemoryType: CUmemorytype, + ///< Source host pointer + pub srcHost: *const ::core::ffi::c_void, + ///< Source device pointer + pub srcDevice: CUdeviceptr_v1, + ///< Source array reference + pub srcArray: CUarray, + ///< Must be NULL + pub reserved0: *mut ::core::ffi::c_void, + ///< Source pitch (ignored when src is array) + pub srcPitch: ::core::ffi::c_uint, + ///< Source height (ignored when src is array; may be 0 if Depth==1) + pub srcHeight: ::core::ffi::c_uint, + ///< Destination X in bytes + pub dstXInBytes: ::core::ffi::c_uint, + ///< Destination Y + pub dstY: ::core::ffi::c_uint, + ///< Destination Z + pub dstZ: ::core::ffi::c_uint, + ///< Destination LOD + pub dstLOD: ::core::ffi::c_uint, + ///< Destination memory type (host, device, array) + pub dstMemoryType: CUmemorytype, + ///< Destination host pointer + pub dstHost: *mut ::core::ffi::c_void, + ///< Destination device pointer + pub dstDevice: CUdeviceptr_v1, + ///< Destination array reference + pub dstArray: CUarray, + ///< Must be NULL + pub reserved1: *mut ::core::ffi::c_void, + ///< Destination pitch (ignored when dst is array) + pub dstPitch: ::core::ffi::c_uint, + ///< Destination height (ignored when dst is array; may be 0 if Depth==1) + pub dstHeight: ::core::ffi::c_uint, + ///< Width of 3D memory copy in bytes + pub WidthInBytes: ::core::ffi::c_uint, + ///< Height of 3D memory copy + pub Height: ::core::ffi::c_uint, + ///< Depth of 3D memory copy + pub Depth: ::core::ffi::c_uint, +} +pub type CUDA_MEMCPY3D_v1 = CUDA_MEMCPY3D_v1_st; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_ARRAY_DESCRIPTOR_v1_st { + ///< Width of array + pub Width: ::core::ffi::c_uint, + ///< Height of array + pub Height: ::core::ffi::c_uint, + ///< Array format + pub Format: CUarray_format, + ///< Channels per array element + pub NumChannels: ::core::ffi::c_uint, +} +pub type CUDA_ARRAY_DESCRIPTOR_v1 = CUDA_ARRAY_DESCRIPTOR_v1_st; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUDA_ARRAY3D_DESCRIPTOR_v1_st { + ///< Width of 3D array + pub Width: ::core::ffi::c_uint, + ///< Height of 3D array + pub Height: ::core::ffi::c_uint, + ///< Depth of 3D array + pub Depth: ::core::ffi::c_uint, + ///< Array format + pub Format: CUarray_format, + ///< Channels per array element + pub NumChannels: ::core::ffi::c_uint, + ///< Flags + pub Flags: ::core::ffi::c_uint, +} +pub type CUDA_ARRAY3D_DESCRIPTOR_v1 = CUDA_ARRAY3D_DESCRIPTOR_v1_st; +impl CUoutput_mode_enum { + ///< Output mode Key-Value pair format. + pub const CU_OUT_KEY_VALUE_PAIR: CUoutput_mode_enum = CUoutput_mode_enum(0); +} +impl CUoutput_mode_enum { + ///< Output mode Comma separated values format. + pub const CU_OUT_CSV: CUoutput_mode_enum = CUoutput_mode_enum(1); +} +#[repr(transparent)] +/// Profiler Output Modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUoutput_mode_enum(pub ::core::ffi::c_uint); +/// Profiler Output Modes +pub use self::CUoutput_mode_enum as CUoutput_mode; +pub type GLenum = ::core::ffi::c_uint; +pub type GLuint = ::core::ffi::c_uint; +pub type khronos_int32_t = i32; +impl CUGLDeviceList_enum { + ///< The CUDA devices for all GPUs used by the current OpenGL context + pub const CU_GL_DEVICE_LIST_ALL: CUGLDeviceList_enum = CUGLDeviceList_enum(1); +} +impl CUGLDeviceList_enum { + ///< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame + pub const CU_GL_DEVICE_LIST_CURRENT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum( + 2, + ); +} +impl CUGLDeviceList_enum { + ///< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame + pub const CU_GL_DEVICE_LIST_NEXT_FRAME: CUGLDeviceList_enum = CUGLDeviceList_enum(3); +} +#[repr(transparent)] +/// CUDA devices corresponding to an OpenGL device +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUGLDeviceList_enum(pub ::core::ffi::c_uint); +/// CUDA devices corresponding to an OpenGL device +pub use self::CUGLDeviceList_enum as CUGLDeviceList; +impl CUGLmap_flags_enum { + pub const CU_GL_MAP_RESOURCE_FLAGS_NONE: CUGLmap_flags_enum = CUGLmap_flags_enum(0); +} +impl CUGLmap_flags_enum { + pub const CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY: CUGLmap_flags_enum = CUGLmap_flags_enum( + 1, + ); +} +impl CUGLmap_flags_enum { + pub const CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD: CUGLmap_flags_enum = CUGLmap_flags_enum( + 2, + ); +} +#[repr(transparent)] +/// Flags to map or unmap a resource +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUGLmap_flags_enum(pub ::core::ffi::c_uint); +/// Flags to map or unmap a resource +pub use self::CUGLmap_flags_enum as CUGLmap_flags; +pub type EGLint = khronos_int32_t; +pub type EGLSyncKHR = *mut ::core::ffi::c_void; +pub type EGLImageKHR = *mut ::core::ffi::c_void; +pub type EGLStreamKHR = *mut ::core::ffi::c_void; +impl CUeglFrameType_enum { + ///< Frame type CUDA array + pub const CU_EGL_FRAME_TYPE_ARRAY: CUeglFrameType_enum = CUeglFrameType_enum(0); +} +impl CUeglFrameType_enum { + ///< Frame type pointer + pub const CU_EGL_FRAME_TYPE_PITCH: CUeglFrameType_enum = CUeglFrameType_enum(1); +} +#[repr(transparent)] +/// CUDA EglFrame type - array or pointer +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUeglFrameType_enum(pub ::core::ffi::c_uint); +/// CUDA EglFrame type - array or pointer +pub use self::CUeglFrameType_enum as CUeglFrameType; +impl CUeglResourceLocationFlags_enum { + ///< Resource location sysmem + pub const CU_EGL_RESOURCE_LOCATION_SYSMEM: CUeglResourceLocationFlags_enum = CUeglResourceLocationFlags_enum( + 0, + ); +} +impl CUeglResourceLocationFlags_enum { + ///< Resource location vidmem + pub const CU_EGL_RESOURCE_LOCATION_VIDMEM: CUeglResourceLocationFlags_enum = CUeglResourceLocationFlags_enum( + 1, + ); +} +#[repr(transparent)] +/** Resource location flags- sysmem or vidmem + + For CUDA context on iGPU, since video and system memory are equivalent - + these flags will not have an effect on the execution. + + For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags + to give a hint about the desired location. + + ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory + to be accessed by CUDA. + + ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated + video memory to be accessed by CUDA. + + There may be an additional latency due to new allocation and data migration, + if the frame is produced on a different memory. +*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUeglResourceLocationFlags_enum(pub ::core::ffi::c_uint); +/** Resource location flags- sysmem or vidmem + + For CUDA context on iGPU, since video and system memory are equivalent - + these flags will not have an effect on the execution. + + For CUDA context on dGPU, applications can use the flag ::CUeglResourceLocationFlags + to give a hint about the desired location. + + ::CU_EGL_RESOURCE_LOCATION_SYSMEM - the frame data is made resident on the system memory + to be accessed by CUDA. + + ::CU_EGL_RESOURCE_LOCATION_VIDMEM - the frame data is made resident on the dedicated + video memory to be accessed by CUDA. + + There may be an additional latency due to new allocation and data migration, + if the frame is produced on a different memory. +*/ +pub use self::CUeglResourceLocationFlags_enum as CUeglResourceLocationFlags; +impl CUeglColorFormat_enum { + ///< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 0, + ); +} +impl CUeglColorFormat_enum { + ///< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. + pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 1, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YUV422_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 2, + ); +} +impl CUeglColorFormat_enum { + ///< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. + pub const CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 3, + ); +} +impl CUeglColorFormat_enum { + ///< R/G/B three channels in one surface with BGR byte ordering. Only pitch linear format supported. + pub const CU_EGL_COLOR_FORMAT_RGB: CUeglColorFormat_enum = CUeglColorFormat_enum(4); +} +impl CUeglColorFormat_enum { + ///< R/G/B three channels in one surface with RGB byte ordering. Only pitch linear format supported. + pub const CU_EGL_COLOR_FORMAT_BGR: CUeglColorFormat_enum = CUeglColorFormat_enum(5); +} +impl CUeglColorFormat_enum { + ///< R/G/B/A four channels in one surface with BGRA byte ordering. + pub const CU_EGL_COLOR_FORMAT_ARGB: CUeglColorFormat_enum = CUeglColorFormat_enum(6); +} +impl CUeglColorFormat_enum { + ///< R/G/B/A four channels in one surface with ABGR byte ordering. + pub const CU_EGL_COLOR_FORMAT_RGBA: CUeglColorFormat_enum = CUeglColorFormat_enum(7); +} +impl CUeglColorFormat_enum { + ///< single luminance channel in one surface. + pub const CU_EGL_COLOR_FORMAT_L: CUeglColorFormat_enum = CUeglColorFormat_enum(8); +} +impl CUeglColorFormat_enum { + ///< single color channel in one surface. + pub const CU_EGL_COLOR_FORMAT_R: CUeglColorFormat_enum = CUeglColorFormat_enum(9); +} +impl CUeglColorFormat_enum { + ///< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YUV444_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 10, + ); +} +impl CUeglColorFormat_enum { + ///< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. + pub const CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 11, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V in one surface, interleaved as UYVY in one channel. + pub const CU_EGL_COLOR_FORMAT_YUYV_422: CUeglColorFormat_enum = CUeglColorFormat_enum( + 12, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V in one surface, interleaved as YUYV in one channel. + pub const CU_EGL_COLOR_FORMAT_UYVY_422: CUeglColorFormat_enum = CUeglColorFormat_enum( + 13, + ); +} +impl CUeglColorFormat_enum { + ///< R/G/B/A four channels in one surface with RGBA byte ordering. + pub const CU_EGL_COLOR_FORMAT_ABGR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 14, + ); +} +impl CUeglColorFormat_enum { + ///< R/G/B/A four channels in one surface with ARGB byte ordering. + pub const CU_EGL_COLOR_FORMAT_BGRA: CUeglColorFormat_enum = CUeglColorFormat_enum( + 15, + ); +} +impl CUeglColorFormat_enum { + ///< Alpha color format - one channel in one surface. + pub const CU_EGL_COLOR_FORMAT_A: CUeglColorFormat_enum = CUeglColorFormat_enum(16); +} +impl CUeglColorFormat_enum { + ///< R/G color format - two channels in one surface with GR byte ordering + pub const CU_EGL_COLOR_FORMAT_RG: CUeglColorFormat_enum = CUeglColorFormat_enum(17); +} +impl CUeglColorFormat_enum { + ///< Y, U, V, A four channels in one surface, interleaved as VUYA. + pub const CU_EGL_COLOR_FORMAT_AYUV: CUeglColorFormat_enum = CUeglColorFormat_enum( + 18, + ); +} +impl CUeglColorFormat_enum { + ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 19, + ); +} +impl CUeglColorFormat_enum { + ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 20, + ); +} +impl CUeglColorFormat_enum { + ///< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 21, + ); +} +impl CUeglColorFormat_enum { + ///< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 22, + ); +} +impl CUeglColorFormat_enum { + ///< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 23, + ); +} +impl CUeglColorFormat_enum { + ///< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 24, + ); +} +impl CUeglColorFormat_enum { + ///< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 25, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. + pub const CU_EGL_COLOR_FORMAT_VYUY_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 26, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. + pub const CU_EGL_COLOR_FORMAT_UYVY_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 27, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. + pub const CU_EGL_COLOR_FORMAT_YUYV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 28, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. + pub const CU_EGL_COLOR_FORMAT_YVYU_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 29, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. + pub const CU_EGL_COLOR_FORMAT_YUV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 30, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. + pub const CU_EGL_COLOR_FORMAT_YUVA_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 31, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. + pub const CU_EGL_COLOR_FORMAT_AYUV_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 32, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 33, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 34, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 35, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 36, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 37, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 38, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 39, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 40, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 41, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 42, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 43, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 44, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved RGGB ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 45, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved BGGR ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 46, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved GRBG ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 47, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved GBRG ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 48, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER10_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 49, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER10_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 50, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER10_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 51, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER10_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 52, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 53, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 54, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 55, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 56, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER14_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 57, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER14_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 58, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER14_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 59, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER14_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 60, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER20_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 61, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER20_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 62, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER20_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 63, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER20_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 64, + ); +} +impl CUeglColorFormat_enum { + ///< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU444_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 65, + ); +} +impl CUeglColorFormat_enum { + ///< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_YVU422_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 66, + ); +} +impl CUeglColorFormat_enum { + ///< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 67, + ); +} +impl CUeglColorFormat_enum { + ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. + pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 68, + ); +} +impl CUeglColorFormat_enum { + ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. + pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 69, + ); +} +impl CUeglColorFormat_enum { + ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. + pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 70, + ); +} +impl CUeglColorFormat_enum { + ///< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. + pub const CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG: CUeglColorFormat_enum = CUeglColorFormat_enum( + 71, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved BCCR ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_BCCR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 72, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved RCCB ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_RCCB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 73, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved CRBC ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_CRBC: CUeglColorFormat_enum = CUeglColorFormat_enum( + 74, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer format - one channel in one surface with interleaved CBRC ordering. + pub const CU_EGL_COLOR_FORMAT_BAYER_CBRC: CUeglColorFormat_enum = CUeglColorFormat_enum( + 75, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER10_CCCC: CUeglColorFormat_enum = CUeglColorFormat_enum( + 76, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_BCCR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 77, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_RCCB: CUeglColorFormat_enum = CUeglColorFormat_enum( + 78, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_CRBC: CUeglColorFormat_enum = CUeglColorFormat_enum( + 79, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_CBRC: CUeglColorFormat_enum = CUeglColorFormat_enum( + 80, + ); +} +impl CUeglColorFormat_enum { + ///< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. + pub const CU_EGL_COLOR_FORMAT_BAYER12_CCCC: CUeglColorFormat_enum = CUeglColorFormat_enum( + 81, + ); +} +impl CUeglColorFormat_enum { + ///< Color format for single Y plane. + pub const CU_EGL_COLOR_FORMAT_Y: CUeglColorFormat_enum = CUeglColorFormat_enum(82); +} +impl CUeglColorFormat_enum { + ///< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum( + 83, + ); +} +impl CUeglColorFormat_enum { + ///< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum( + 84, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height= 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum( + 85, + ); +} +impl CUeglColorFormat_enum { + /**< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height += 1/2 Y height.*/ + pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum( + 86, + ); +} +impl CUeglColorFormat_enum { + ///< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum( + 87, + ); +} +impl CUeglColorFormat_enum { + ///< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum( + 88, + ); +} +impl CUeglColorFormat_enum { + /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height += 1/2 Y height.*/ + pub const CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum( + 89, + ); +} +impl CUeglColorFormat_enum { + ///< Y, V, U each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum( + 90, + ); +} +impl CUeglColorFormat_enum { + ///< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum( + 91, + ); +} +impl CUeglColorFormat_enum { + ///< Y10, V10U10 in two surfaces (VU as one surface), U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum( + 92, + ); +} +impl CUeglColorFormat_enum { + ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020: CUeglColorFormat_enum = CUeglColorFormat_enum( + 93, + ); +} +impl CUeglColorFormat_enum { + ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR: CUeglColorFormat_enum = CUeglColorFormat_enum( + 94, + ); +} +impl CUeglColorFormat_enum { + ///< Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709: CUeglColorFormat_enum = CUeglColorFormat_enum( + 95, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Color format for single Y plane. + pub const CU_EGL_COLOR_FORMAT_Y_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 96, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Color format for single Y plane. + pub const CU_EGL_COLOR_FORMAT_Y_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 97, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Color format for single Y10 plane. + pub const CU_EGL_COLOR_FORMAT_Y10_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 98, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Color format for single Y10 plane. + pub const CU_EGL_COLOR_FORMAT_Y10_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 99, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Color format for single Y12 plane. + pub const CU_EGL_COLOR_FORMAT_Y12_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 100, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Color format for single Y12 plane. + pub const CU_EGL_COLOR_FORMAT_Y12_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 101, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V, A four channels in one surface, interleaved as AVUY. + pub const CU_EGL_COLOR_FORMAT_YUVA: CUeglColorFormat_enum = CUeglColorFormat_enum( + 102, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V three channels in one surface, interleaved as VUY. Only pitch linear format supported. + pub const CU_EGL_COLOR_FORMAT_YUV: CUeglColorFormat_enum = CUeglColorFormat_enum( + 103, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V in one surface, interleaved as YVYU in one channel. + pub const CU_EGL_COLOR_FORMAT_YVYU: CUeglColorFormat_enum = CUeglColorFormat_enum( + 104, + ); +} +impl CUeglColorFormat_enum { + ///< Y, U, V in one surface, interleaved as VYUY in one channel. + pub const CU_EGL_COLOR_FORMAT_VYUY: CUeglColorFormat_enum = CUeglColorFormat_enum( + 105, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 106, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y10, V10U10 in two surfaces(VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 107, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 108, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 109, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 110, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. + pub const CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 111, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 112, + ); +} +impl CUeglColorFormat_enum { + ///< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. + pub const CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER: CUeglColorFormat_enum = CUeglColorFormat_enum( + 113, + ); +} +impl CUeglColorFormat_enum { + pub const CU_EGL_COLOR_FORMAT_MAX: CUeglColorFormat_enum = CUeglColorFormat_enum( + 114, + ); +} +#[repr(transparent)] +/** CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops. + Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CUeglColorFormat_enum(pub ::core::ffi::c_uint); +/** CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops. + Three channel formats are currently not supported for ::CU_EGL_FRAME_TYPE_ARRAY*/ +pub use self::CUeglColorFormat_enum as CUeglColorFormat; +/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL. + + Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/ +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CUeglFrame_st { + pub frame: CUeglFrame_st__bindgen_ty_1, + ///< Width of first plane + pub width: ::core::ffi::c_uint, + ///< Height of first plane + pub height: ::core::ffi::c_uint, + ///< Depth of first plane + pub depth: ::core::ffi::c_uint, + ///< Pitch of first plane + pub pitch: ::core::ffi::c_uint, + ///< Number of planes + pub planeCount: ::core::ffi::c_uint, + ///< Number of channels for the plane + pub numChannels: ::core::ffi::c_uint, + ///< Array or Pitch + pub frameType: CUeglFrameType, + ///< CUDA EGL Color Format + pub eglColorFormat: CUeglColorFormat, + ///< CUDA Array Format + pub cuFormat: CUarray_format, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CUeglFrame_st__bindgen_ty_1 { + ///< Array of CUarray corresponding to each plane + pub pArray: [CUarray; 3usize], + ///< Array of Pointers corresponding to each plane + pub pPitch: [*mut ::core::ffi::c_void; 3usize], +} +/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL. + + Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/ +pub type CUeglFrame_v1 = CUeglFrame_st; +/** CUDA EGLFrame structure Descriptor - structure defining one frame of EGL. + + Each frame may contain one or more planes depending on whether the surface * is Multiplanar or not.*/ +pub type CUeglFrame = CUeglFrame_v1; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CUeglStreamConnection_st { + _unused: [u8; 0], +} +/// CUDA EGLSream Connection +pub type CUeglStreamConnection = *mut CUeglStreamConnection_st; +impl VdpStatus { + pub const VDP_STATUS_OK: VdpStatus = VdpStatus(0); +} +impl VdpStatus { + pub const VDP_STATUS_NO_IMPLEMENTATION: VdpStatus = VdpStatus(1); +} +impl VdpStatus { + pub const VDP_STATUS_DISPLAY_PREEMPTED: VdpStatus = VdpStatus(2); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_HANDLE: VdpStatus = VdpStatus(3); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_POINTER: VdpStatus = VdpStatus(4); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_CHROMA_TYPE: VdpStatus = VdpStatus(5); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_Y_CB_CR_FORMAT: VdpStatus = VdpStatus(6); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_RGBA_FORMAT: VdpStatus = VdpStatus(7); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_INDEXED_FORMAT: VdpStatus = VdpStatus(8); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_COLOR_STANDARD: VdpStatus = VdpStatus(9); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_COLOR_TABLE_FORMAT: VdpStatus = VdpStatus(10); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_BLEND_FACTOR: VdpStatus = VdpStatus(11); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_BLEND_EQUATION: VdpStatus = VdpStatus(12); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_FLAG: VdpStatus = VdpStatus(13); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_DECODER_PROFILE: VdpStatus = VdpStatus(14); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE: VdpStatus = VdpStatus(15); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER: VdpStatus = VdpStatus(16); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE: VdpStatus = VdpStatus(17); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE: VdpStatus = VdpStatus( + 18, + ); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_FUNC_ID: VdpStatus = VdpStatus(19); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_SIZE: VdpStatus = VdpStatus(20); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_VALUE: VdpStatus = VdpStatus(21); +} +impl VdpStatus { + pub const VDP_STATUS_INVALID_STRUCT_VERSION: VdpStatus = VdpStatus(22); +} +impl VdpStatus { + pub const VDP_STATUS_RESOURCES: VdpStatus = VdpStatus(23); +} +impl VdpStatus { + pub const VDP_STATUS_HANDLE_DEVICE_MISMATCH: VdpStatus = VdpStatus(24); +} +impl VdpStatus { + pub const VDP_STATUS_ERROR: VdpStatus = VdpStatus(25); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct VdpStatus(pub ::core::ffi::c_uint); +pub type VdpDevice = u32; +pub type VdpVideoSurface = u32; +pub type VdpOutputSurface = u32; +pub type VdpFuncId = u32; +pub type VdpGetProcAddress = ::core::option::Option< + unsafe extern "system" fn( + device: VdpDevice, + function_id: VdpFuncId, + function_pointer: *mut *mut ::core::ffi::c_void, + ) -> VdpStatus, +>; +impl CUerror { + pub const INVALID_VALUE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(1) + }); + pub const OUT_OF_MEMORY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(2) + }); + pub const NOT_INITIALIZED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(3) + }); + pub const DEINITIALIZED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(4) + }); + pub const PROFILER_DISABLED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(5) + }); + pub const PROFILER_NOT_INITIALIZED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(6) + }); + pub const PROFILER_ALREADY_STARTED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(7) + }); + pub const PROFILER_ALREADY_STOPPED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(8) + }); + pub const STUB_LIBRARY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(34) + }); + pub const DEVICE_UNAVAILABLE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(46) + }); + pub const NO_DEVICE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(100) + }); + pub const INVALID_DEVICE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(101) + }); + pub const DEVICE_NOT_LICENSED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(102) + }); + pub const INVALID_IMAGE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(200) + }); + pub const INVALID_CONTEXT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(201) + }); + pub const CONTEXT_ALREADY_CURRENT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(202) + }); + pub const MAP_FAILED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(205) + }); + pub const UNMAP_FAILED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(206) + }); + pub const ARRAY_IS_MAPPED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(207) + }); + pub const ALREADY_MAPPED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(208) + }); + pub const NO_BINARY_FOR_GPU: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(209) + }); + pub const ALREADY_ACQUIRED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(210) + }); + pub const NOT_MAPPED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(211) + }); + pub const NOT_MAPPED_AS_ARRAY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(212) + }); + pub const NOT_MAPPED_AS_POINTER: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(213) + }); + pub const ECC_UNCORRECTABLE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(214) + }); + pub const UNSUPPORTED_LIMIT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(215) + }); + pub const CONTEXT_ALREADY_IN_USE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(216) + }); + pub const PEER_ACCESS_UNSUPPORTED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(217) + }); + pub const INVALID_PTX: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(218) + }); + pub const INVALID_GRAPHICS_CONTEXT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(219) + }); + pub const NVLINK_UNCORRECTABLE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(220) + }); + pub const JIT_COMPILER_NOT_FOUND: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(221) + }); + pub const UNSUPPORTED_PTX_VERSION: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(222) + }); + pub const JIT_COMPILATION_DISABLED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(223) + }); + pub const UNSUPPORTED_EXEC_AFFINITY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(224) + }); + pub const UNSUPPORTED_DEVSIDE_SYNC: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(225) + }); + pub const INVALID_SOURCE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(300) + }); + pub const FILE_NOT_FOUND: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(301) + }); + pub const SHARED_OBJECT_SYMBOL_NOT_FOUND: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(302) + }); + pub const SHARED_OBJECT_INIT_FAILED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(303) + }); + pub const OPERATING_SYSTEM: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(304) + }); + pub const INVALID_HANDLE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(400) + }); + pub const ILLEGAL_STATE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(401) + }); + pub const LOSSY_QUERY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(402) + }); + pub const NOT_FOUND: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(500) + }); + pub const NOT_READY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(600) + }); + pub const ILLEGAL_ADDRESS: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(700) + }); + pub const LAUNCH_OUT_OF_RESOURCES: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(701) + }); + pub const LAUNCH_TIMEOUT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(702) + }); + pub const LAUNCH_INCOMPATIBLE_TEXTURING: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(703) + }); + pub const PEER_ACCESS_ALREADY_ENABLED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(704) + }); + pub const PEER_ACCESS_NOT_ENABLED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(705) + }); + pub const PRIMARY_CONTEXT_ACTIVE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(708) + }); + pub const CONTEXT_IS_DESTROYED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(709) + }); + pub const ASSERT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(710) + }); + pub const TOO_MANY_PEERS: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(711) + }); + pub const HOST_MEMORY_ALREADY_REGISTERED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(712) + }); + pub const HOST_MEMORY_NOT_REGISTERED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(713) + }); + pub const HARDWARE_STACK_ERROR: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(714) + }); + pub const ILLEGAL_INSTRUCTION: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(715) + }); + pub const MISALIGNED_ADDRESS: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(716) + }); + pub const INVALID_ADDRESS_SPACE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(717) + }); + pub const INVALID_PC: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(718) + }); + pub const LAUNCH_FAILED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(719) + }); + pub const COOPERATIVE_LAUNCH_TOO_LARGE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(720) + }); + pub const NOT_PERMITTED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(800) + }); + pub const NOT_SUPPORTED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(801) + }); + pub const SYSTEM_NOT_READY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(802) + }); + pub const SYSTEM_DRIVER_MISMATCH: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(803) + }); + pub const COMPAT_NOT_SUPPORTED_ON_DEVICE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(804) + }); + pub const MPS_CONNECTION_FAILED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(805) + }); + pub const MPS_RPC_FAILURE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(806) + }); + pub const MPS_SERVER_NOT_READY: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(807) + }); + pub const MPS_MAX_CLIENTS_REACHED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(808) + }); + pub const MPS_MAX_CONNECTIONS_REACHED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(809) + }); + pub const MPS_CLIENT_TERMINATED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(810) + }); + pub const CDP_NOT_SUPPORTED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(811) + }); + pub const CDP_VERSION_MISMATCH: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(812) + }); + pub const STREAM_CAPTURE_UNSUPPORTED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(900) + }); + pub const STREAM_CAPTURE_INVALIDATED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(901) + }); + pub const STREAM_CAPTURE_MERGE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(902) + }); + pub const STREAM_CAPTURE_UNMATCHED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(903) + }); + pub const STREAM_CAPTURE_UNJOINED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(904) + }); + pub const STREAM_CAPTURE_ISOLATION: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(905) + }); + pub const STREAM_CAPTURE_IMPLICIT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(906) + }); + pub const CAPTURED_EVENT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(907) + }); + pub const STREAM_CAPTURE_WRONG_THREAD: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(908) + }); + pub const TIMEOUT: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(909) + }); + pub const GRAPH_EXEC_UPDATE_FAILURE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(910) + }); + pub const EXTERNAL_DEVICE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(911) + }); + pub const INVALID_CLUSTER_SIZE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(912) + }); + pub const FUNCTION_NOT_LOADED: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(913) + }); + pub const INVALID_RESOURCE_TYPE: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(914) + }); + pub const INVALID_RESOURCE_CONFIGURATION: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(915) + }); + pub const UNKNOWN: CUerror = CUerror(unsafe { + ::core::num::NonZeroU32::new_unchecked(999) + }); +} +#[repr(transparent)] +#[derive(Debug, Hash, Copy, Clone, PartialEq, Eq)] +pub struct CUerror(pub ::core::num::NonZeroU32); +pub trait CUresultConsts { + const SUCCESS: CUresult = CUresult::Ok(()); + const ERROR_INVALID_VALUE: CUresult = CUresult::Err(CUerror::INVALID_VALUE); + const ERROR_OUT_OF_MEMORY: CUresult = CUresult::Err(CUerror::OUT_OF_MEMORY); + const ERROR_NOT_INITIALIZED: CUresult = CUresult::Err(CUerror::NOT_INITIALIZED); + const ERROR_DEINITIALIZED: CUresult = CUresult::Err(CUerror::DEINITIALIZED); + const ERROR_PROFILER_DISABLED: CUresult = CUresult::Err(CUerror::PROFILER_DISABLED); + const ERROR_PROFILER_NOT_INITIALIZED: CUresult = CUresult::Err( + CUerror::PROFILER_NOT_INITIALIZED, + ); + const ERROR_PROFILER_ALREADY_STARTED: CUresult = CUresult::Err( + CUerror::PROFILER_ALREADY_STARTED, + ); + const ERROR_PROFILER_ALREADY_STOPPED: CUresult = CUresult::Err( + CUerror::PROFILER_ALREADY_STOPPED, + ); + const ERROR_STUB_LIBRARY: CUresult = CUresult::Err(CUerror::STUB_LIBRARY); + const ERROR_DEVICE_UNAVAILABLE: CUresult = CUresult::Err( + CUerror::DEVICE_UNAVAILABLE, + ); + const ERROR_NO_DEVICE: CUresult = CUresult::Err(CUerror::NO_DEVICE); + const ERROR_INVALID_DEVICE: CUresult = CUresult::Err(CUerror::INVALID_DEVICE); + const ERROR_DEVICE_NOT_LICENSED: CUresult = CUresult::Err( + CUerror::DEVICE_NOT_LICENSED, + ); + const ERROR_INVALID_IMAGE: CUresult = CUresult::Err(CUerror::INVALID_IMAGE); + const ERROR_INVALID_CONTEXT: CUresult = CUresult::Err(CUerror::INVALID_CONTEXT); + const ERROR_CONTEXT_ALREADY_CURRENT: CUresult = CUresult::Err( + CUerror::CONTEXT_ALREADY_CURRENT, + ); + const ERROR_MAP_FAILED: CUresult = CUresult::Err(CUerror::MAP_FAILED); + const ERROR_UNMAP_FAILED: CUresult = CUresult::Err(CUerror::UNMAP_FAILED); + const ERROR_ARRAY_IS_MAPPED: CUresult = CUresult::Err(CUerror::ARRAY_IS_MAPPED); + const ERROR_ALREADY_MAPPED: CUresult = CUresult::Err(CUerror::ALREADY_MAPPED); + const ERROR_NO_BINARY_FOR_GPU: CUresult = CUresult::Err(CUerror::NO_BINARY_FOR_GPU); + const ERROR_ALREADY_ACQUIRED: CUresult = CUresult::Err(CUerror::ALREADY_ACQUIRED); + const ERROR_NOT_MAPPED: CUresult = CUresult::Err(CUerror::NOT_MAPPED); + const ERROR_NOT_MAPPED_AS_ARRAY: CUresult = CUresult::Err( + CUerror::NOT_MAPPED_AS_ARRAY, + ); + const ERROR_NOT_MAPPED_AS_POINTER: CUresult = CUresult::Err( + CUerror::NOT_MAPPED_AS_POINTER, + ); + const ERROR_ECC_UNCORRECTABLE: CUresult = CUresult::Err(CUerror::ECC_UNCORRECTABLE); + const ERROR_UNSUPPORTED_LIMIT: CUresult = CUresult::Err(CUerror::UNSUPPORTED_LIMIT); + const ERROR_CONTEXT_ALREADY_IN_USE: CUresult = CUresult::Err( + CUerror::CONTEXT_ALREADY_IN_USE, + ); + const ERROR_PEER_ACCESS_UNSUPPORTED: CUresult = CUresult::Err( + CUerror::PEER_ACCESS_UNSUPPORTED, + ); + const ERROR_INVALID_PTX: CUresult = CUresult::Err(CUerror::INVALID_PTX); + const ERROR_INVALID_GRAPHICS_CONTEXT: CUresult = CUresult::Err( + CUerror::INVALID_GRAPHICS_CONTEXT, + ); + const ERROR_NVLINK_UNCORRECTABLE: CUresult = CUresult::Err( + CUerror::NVLINK_UNCORRECTABLE, + ); + const ERROR_JIT_COMPILER_NOT_FOUND: CUresult = CUresult::Err( + CUerror::JIT_COMPILER_NOT_FOUND, + ); + const ERROR_UNSUPPORTED_PTX_VERSION: CUresult = CUresult::Err( + CUerror::UNSUPPORTED_PTX_VERSION, + ); + const ERROR_JIT_COMPILATION_DISABLED: CUresult = CUresult::Err( + CUerror::JIT_COMPILATION_DISABLED, + ); + const ERROR_UNSUPPORTED_EXEC_AFFINITY: CUresult = CUresult::Err( + CUerror::UNSUPPORTED_EXEC_AFFINITY, + ); + const ERROR_UNSUPPORTED_DEVSIDE_SYNC: CUresult = CUresult::Err( + CUerror::UNSUPPORTED_DEVSIDE_SYNC, + ); + const ERROR_INVALID_SOURCE: CUresult = CUresult::Err(CUerror::INVALID_SOURCE); + const ERROR_FILE_NOT_FOUND: CUresult = CUresult::Err(CUerror::FILE_NOT_FOUND); + const ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: CUresult = CUresult::Err( + CUerror::SHARED_OBJECT_SYMBOL_NOT_FOUND, + ); + const ERROR_SHARED_OBJECT_INIT_FAILED: CUresult = CUresult::Err( + CUerror::SHARED_OBJECT_INIT_FAILED, + ); + const ERROR_OPERATING_SYSTEM: CUresult = CUresult::Err(CUerror::OPERATING_SYSTEM); + const ERROR_INVALID_HANDLE: CUresult = CUresult::Err(CUerror::INVALID_HANDLE); + const ERROR_ILLEGAL_STATE: CUresult = CUresult::Err(CUerror::ILLEGAL_STATE); + const ERROR_LOSSY_QUERY: CUresult = CUresult::Err(CUerror::LOSSY_QUERY); + const ERROR_NOT_FOUND: CUresult = CUresult::Err(CUerror::NOT_FOUND); + const ERROR_NOT_READY: CUresult = CUresult::Err(CUerror::NOT_READY); + const ERROR_ILLEGAL_ADDRESS: CUresult = CUresult::Err(CUerror::ILLEGAL_ADDRESS); + const ERROR_LAUNCH_OUT_OF_RESOURCES: CUresult = CUresult::Err( + CUerror::LAUNCH_OUT_OF_RESOURCES, + ); + const ERROR_LAUNCH_TIMEOUT: CUresult = CUresult::Err(CUerror::LAUNCH_TIMEOUT); + const ERROR_LAUNCH_INCOMPATIBLE_TEXTURING: CUresult = CUresult::Err( + CUerror::LAUNCH_INCOMPATIBLE_TEXTURING, + ); + const ERROR_PEER_ACCESS_ALREADY_ENABLED: CUresult = CUresult::Err( + CUerror::PEER_ACCESS_ALREADY_ENABLED, + ); + const ERROR_PEER_ACCESS_NOT_ENABLED: CUresult = CUresult::Err( + CUerror::PEER_ACCESS_NOT_ENABLED, + ); + const ERROR_PRIMARY_CONTEXT_ACTIVE: CUresult = CUresult::Err( + CUerror::PRIMARY_CONTEXT_ACTIVE, + ); + const ERROR_CONTEXT_IS_DESTROYED: CUresult = CUresult::Err( + CUerror::CONTEXT_IS_DESTROYED, + ); + const ERROR_ASSERT: CUresult = CUresult::Err(CUerror::ASSERT); + const ERROR_TOO_MANY_PEERS: CUresult = CUresult::Err(CUerror::TOO_MANY_PEERS); + const ERROR_HOST_MEMORY_ALREADY_REGISTERED: CUresult = CUresult::Err( + CUerror::HOST_MEMORY_ALREADY_REGISTERED, + ); + const ERROR_HOST_MEMORY_NOT_REGISTERED: CUresult = CUresult::Err( + CUerror::HOST_MEMORY_NOT_REGISTERED, + ); + const ERROR_HARDWARE_STACK_ERROR: CUresult = CUresult::Err( + CUerror::HARDWARE_STACK_ERROR, + ); + const ERROR_ILLEGAL_INSTRUCTION: CUresult = CUresult::Err( + CUerror::ILLEGAL_INSTRUCTION, + ); + const ERROR_MISALIGNED_ADDRESS: CUresult = CUresult::Err( + CUerror::MISALIGNED_ADDRESS, + ); + const ERROR_INVALID_ADDRESS_SPACE: CUresult = CUresult::Err( + CUerror::INVALID_ADDRESS_SPACE, + ); + const ERROR_INVALID_PC: CUresult = CUresult::Err(CUerror::INVALID_PC); + const ERROR_LAUNCH_FAILED: CUresult = CUresult::Err(CUerror::LAUNCH_FAILED); + const ERROR_COOPERATIVE_LAUNCH_TOO_LARGE: CUresult = CUresult::Err( + CUerror::COOPERATIVE_LAUNCH_TOO_LARGE, + ); + const ERROR_NOT_PERMITTED: CUresult = CUresult::Err(CUerror::NOT_PERMITTED); + const ERROR_NOT_SUPPORTED: CUresult = CUresult::Err(CUerror::NOT_SUPPORTED); + const ERROR_SYSTEM_NOT_READY: CUresult = CUresult::Err(CUerror::SYSTEM_NOT_READY); + const ERROR_SYSTEM_DRIVER_MISMATCH: CUresult = CUresult::Err( + CUerror::SYSTEM_DRIVER_MISMATCH, + ); + const ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: CUresult = CUresult::Err( + CUerror::COMPAT_NOT_SUPPORTED_ON_DEVICE, + ); + const ERROR_MPS_CONNECTION_FAILED: CUresult = CUresult::Err( + CUerror::MPS_CONNECTION_FAILED, + ); + const ERROR_MPS_RPC_FAILURE: CUresult = CUresult::Err(CUerror::MPS_RPC_FAILURE); + const ERROR_MPS_SERVER_NOT_READY: CUresult = CUresult::Err( + CUerror::MPS_SERVER_NOT_READY, + ); + const ERROR_MPS_MAX_CLIENTS_REACHED: CUresult = CUresult::Err( + CUerror::MPS_MAX_CLIENTS_REACHED, + ); + const ERROR_MPS_MAX_CONNECTIONS_REACHED: CUresult = CUresult::Err( + CUerror::MPS_MAX_CONNECTIONS_REACHED, + ); + const ERROR_MPS_CLIENT_TERMINATED: CUresult = CUresult::Err( + CUerror::MPS_CLIENT_TERMINATED, + ); + const ERROR_CDP_NOT_SUPPORTED: CUresult = CUresult::Err(CUerror::CDP_NOT_SUPPORTED); + const ERROR_CDP_VERSION_MISMATCH: CUresult = CUresult::Err( + CUerror::CDP_VERSION_MISMATCH, + ); + const ERROR_STREAM_CAPTURE_UNSUPPORTED: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_UNSUPPORTED, + ); + const ERROR_STREAM_CAPTURE_INVALIDATED: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_INVALIDATED, + ); + const ERROR_STREAM_CAPTURE_MERGE: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_MERGE, + ); + const ERROR_STREAM_CAPTURE_UNMATCHED: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_UNMATCHED, + ); + const ERROR_STREAM_CAPTURE_UNJOINED: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_UNJOINED, + ); + const ERROR_STREAM_CAPTURE_ISOLATION: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_ISOLATION, + ); + const ERROR_STREAM_CAPTURE_IMPLICIT: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_IMPLICIT, + ); + const ERROR_CAPTURED_EVENT: CUresult = CUresult::Err(CUerror::CAPTURED_EVENT); + const ERROR_STREAM_CAPTURE_WRONG_THREAD: CUresult = CUresult::Err( + CUerror::STREAM_CAPTURE_WRONG_THREAD, + ); + const ERROR_TIMEOUT: CUresult = CUresult::Err(CUerror::TIMEOUT); + const ERROR_GRAPH_EXEC_UPDATE_FAILURE: CUresult = CUresult::Err( + CUerror::GRAPH_EXEC_UPDATE_FAILURE, + ); + const ERROR_EXTERNAL_DEVICE: CUresult = CUresult::Err(CUerror::EXTERNAL_DEVICE); + const ERROR_INVALID_CLUSTER_SIZE: CUresult = CUresult::Err( + CUerror::INVALID_CLUSTER_SIZE, + ); + const ERROR_FUNCTION_NOT_LOADED: CUresult = CUresult::Err( + CUerror::FUNCTION_NOT_LOADED, + ); + const ERROR_INVALID_RESOURCE_TYPE: CUresult = CUresult::Err( + CUerror::INVALID_RESOURCE_TYPE, + ); + const ERROR_INVALID_RESOURCE_CONFIGURATION: CUresult = CUresult::Err( + CUerror::INVALID_RESOURCE_CONFIGURATION, + ); + const ERROR_UNKNOWN: CUresult = CUresult::Err(CUerror::UNKNOWN); +} +impl CUresultConsts for CUresult {} +#[must_use] +pub type CUresult = ::core::result::Result<(), CUerror>; +const _: fn() = || { + let _ = std::mem::transmute::; +}; +impl From for CUerror { + fn from(error: hip_runtime_sys::hipErrorCode_t) -> Self { + Self(error.0) + } +} +unsafe impl Send for CUdeviceptr {} +unsafe impl Sync for CUdeviceptr {} +unsafe impl Send for CUcontext {} +unsafe impl Sync for CUcontext {} +unsafe impl Send for CUstream {} +unsafe impl Sync for CUstream {} +unsafe impl Send for CUmodule {} +unsafe impl Sync for CUmodule {} +unsafe impl Send for CUfunction {} +unsafe impl Sync for CUfunction {} +unsafe impl Send for CUlibrary {} +unsafe impl Sync for CUlibrary {} diff --git a/ext/hip_runtime-sys/README b/ext/hip_runtime-sys/README deleted file mode 100644 index d80b30aa..00000000 --- a/ext/hip_runtime-sys/README +++ /dev/null @@ -1 +0,0 @@ -bindgen --rust-target 1.77 /opt/rocm/include/hip/hip_runtime_api.h -o hip_runtime_api.rs --no-layout-tests --default-enum-style=newtype --allowlist-function "hip.*" --allowlist-type "hip.*" --no-derive-debug --must-use-type hipError_t --new-type-alias "^hipDeviceptr_t$" --allowlist-var "^hip.*$" -- -I/opt/rocm/include -D__HIP_PLATFORM_AMD__ diff --git a/ext/hip_runtime-sys/include/hip_runtime_api.h b/ext/hip_runtime-sys/include/hip_runtime_api.h deleted file mode 100644 index 173daee2..00000000 --- a/ext/hip_runtime-sys/include/hip_runtime_api.h +++ /dev/null @@ -1,2 +0,0 @@ -#define __HIP_PLATFORM_HCC__ -#include \ No newline at end of file diff --git a/ext/hip_runtime-sys/src/hip_runtime_api.rs b/ext/hip_runtime-sys/src/hip_runtime_api.rs deleted file mode 100644 index 56d8557d..00000000 --- a/ext/hip_runtime-sys/src/hip_runtime_api.rs +++ /dev/null @@ -1,7422 +0,0 @@ -/* automatically generated by rust-bindgen 0.70.1 */ - -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct __BindgenBitfieldUnit { - storage: Storage, -} -impl __BindgenBitfieldUnit { - #[inline] - pub const fn new(storage: Storage) -> Self { - Self { storage } - } -} -impl __BindgenBitfieldUnit -where - Storage: AsRef<[u8]> + AsMut<[u8]>, -{ - #[inline] - pub fn get_bit(&self, index: usize) -> bool { - debug_assert!(index / 8 < self.storage.as_ref().len()); - let byte_index = index / 8; - let byte = self.storage.as_ref()[byte_index]; - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - let mask = 1 << bit_index; - byte & mask == mask - } - #[inline] - pub fn set_bit(&mut self, index: usize, val: bool) { - debug_assert!(index / 8 < self.storage.as_ref().len()); - let byte_index = index / 8; - let byte = &mut self.storage.as_mut()[byte_index]; - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - let mask = 1 << bit_index; - if val { - *byte |= mask; - } else { - *byte &= !mask; - } - } - #[inline] - pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; - } - } - val - } - #[inline] - pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); - } - } -} -pub const hipTextureType1D: u32 = 1; -pub const hipTextureType2D: u32 = 2; -pub const hipTextureType3D: u32 = 3; -pub const hipTextureTypeCubemap: u32 = 12; -pub const hipTextureType1DLayered: u32 = 241; -pub const hipTextureType2DLayered: u32 = 242; -pub const hipTextureTypeCubemapLayered: u32 = 252; -pub const hipIpcMemLazyEnablePeerAccess: u32 = 1; -pub const hipStreamDefault: u32 = 0; -pub const hipStreamNonBlocking: u32 = 1; -pub const hipEventDefault: u32 = 0; -pub const hipEventBlockingSync: u32 = 1; -pub const hipEventDisableTiming: u32 = 2; -pub const hipEventInterprocess: u32 = 4; -pub const hipEventDisableSystemFence: u32 = 536870912; -pub const hipEventReleaseToDevice: u32 = 1073741824; -pub const hipEventReleaseToSystem: u32 = 2147483648; -pub const hipHostMallocDefault: u32 = 0; -pub const hipHostMallocPortable: u32 = 1; -pub const hipHostMallocMapped: u32 = 2; -pub const hipHostMallocWriteCombined: u32 = 4; -pub const hipHostMallocNumaUser: u32 = 536870912; -pub const hipHostMallocCoherent: u32 = 1073741824; -pub const hipHostMallocNonCoherent: u32 = 2147483648; -pub const hipMemAttachGlobal: u32 = 1; -pub const hipMemAttachHost: u32 = 2; -pub const hipMemAttachSingle: u32 = 4; -pub const hipDeviceMallocDefault: u32 = 0; -pub const hipDeviceMallocFinegrained: u32 = 1; -pub const hipMallocSignalMemory: u32 = 2; -pub const hipDeviceMallocUncached: u32 = 3; -pub const hipHostRegisterDefault: u32 = 0; -pub const hipHostRegisterPortable: u32 = 1; -pub const hipHostRegisterMapped: u32 = 2; -pub const hipHostRegisterIoMemory: u32 = 4; -pub const hipHostRegisterReadOnly: u32 = 8; -pub const hipExtHostRegisterCoarseGrained: u32 = 8; -pub const hipDeviceScheduleAuto: u32 = 0; -pub const hipDeviceScheduleSpin: u32 = 1; -pub const hipDeviceScheduleYield: u32 = 2; -pub const hipDeviceScheduleBlockingSync: u32 = 4; -pub const hipDeviceScheduleMask: u32 = 7; -pub const hipDeviceMapHost: u32 = 8; -pub const hipDeviceLmemResizeToMax: u32 = 16; -pub const hipArrayDefault: u32 = 0; -pub const hipArrayLayered: u32 = 1; -pub const hipArraySurfaceLoadStore: u32 = 2; -pub const hipArrayCubemap: u32 = 4; -pub const hipArrayTextureGather: u32 = 8; -pub const hipOccupancyDefault: u32 = 0; -pub const hipOccupancyDisableCachingOverride: u32 = 1; -pub const hipCooperativeLaunchMultiDeviceNoPreSync: u32 = 1; -pub const hipCooperativeLaunchMultiDeviceNoPostSync: u32 = 2; -pub const hipExtAnyOrderLaunch: u32 = 1; -pub const hipStreamWaitValueGte: u32 = 0; -pub const hipStreamWaitValueEq: u32 = 1; -pub const hipStreamWaitValueAnd: u32 = 2; -pub const hipStreamWaitValueNor: u32 = 3; -pub const hipExternalMemoryDedicated: u32 = 1; -#[doc = " @defgroup GlobalDefs Global enum and defines\n @{\n\n/\n/**\n hipDeviceArch_t\n"] -#[repr(C)] -#[repr(align(4))] -#[derive(Copy, Clone)] -pub struct hipDeviceArch_t { - pub _bitfield_align_1: [u8; 0], - pub _bitfield_1: __BindgenBitfieldUnit<[u8; 3usize]>, - pub __bindgen_padding_0: u8, -} -impl hipDeviceArch_t { - #[inline] - pub fn hasGlobalInt32Atomics(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasGlobalInt32Atomics(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasGlobalFloatAtomicExch(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasGlobalFloatAtomicExch(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasSharedInt32Atomics(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasSharedInt32Atomics(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasSharedFloatAtomicExch(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasSharedFloatAtomicExch(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasFloatAtomicAdd(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasFloatAtomicAdd(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasGlobalInt64Atomics(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasGlobalInt64Atomics(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasSharedInt64Atomics(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasSharedInt64Atomics(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasDoubles(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasDoubles(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasWarpVote(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasWarpVote(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasWarpBallot(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasWarpBallot(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasWarpShuffle(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasWarpShuffle(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(10usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasFunnelShift(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasFunnelShift(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(11usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasThreadFenceSystem(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasThreadFenceSystem(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasSyncThreadsExt(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasSyncThreadsExt(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(13usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasSurfaceFuncs(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasSurfaceFuncs(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 1u8, val as u64) - } - } - #[inline] - pub fn has3dGrid(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 1u8) as u32) } - } - #[inline] - pub fn set_has3dGrid(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(15usize, 1u8, val as u64) - } - } - #[inline] - pub fn hasDynamicParallelism(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 1u8) as u32) } - } - #[inline] - pub fn set_hasDynamicParallelism(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 1u8, val as u64) - } - } - #[inline] - pub fn new_bitfield_1( - hasGlobalInt32Atomics: ::std::os::raw::c_uint, - hasGlobalFloatAtomicExch: ::std::os::raw::c_uint, - hasSharedInt32Atomics: ::std::os::raw::c_uint, - hasSharedFloatAtomicExch: ::std::os::raw::c_uint, - hasFloatAtomicAdd: ::std::os::raw::c_uint, - hasGlobalInt64Atomics: ::std::os::raw::c_uint, - hasSharedInt64Atomics: ::std::os::raw::c_uint, - hasDoubles: ::std::os::raw::c_uint, - hasWarpVote: ::std::os::raw::c_uint, - hasWarpBallot: ::std::os::raw::c_uint, - hasWarpShuffle: ::std::os::raw::c_uint, - hasFunnelShift: ::std::os::raw::c_uint, - hasThreadFenceSystem: ::std::os::raw::c_uint, - hasSyncThreadsExt: ::std::os::raw::c_uint, - hasSurfaceFuncs: ::std::os::raw::c_uint, - has3dGrid: ::std::os::raw::c_uint, - hasDynamicParallelism: ::std::os::raw::c_uint, - ) -> __BindgenBitfieldUnit<[u8; 3usize]> { - let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); - __bindgen_bitfield_unit.set(0usize, 1u8, { - let hasGlobalInt32Atomics: u32 = - unsafe { ::std::mem::transmute(hasGlobalInt32Atomics) }; - hasGlobalInt32Atomics as u64 - }); - __bindgen_bitfield_unit.set(1usize, 1u8, { - let hasGlobalFloatAtomicExch: u32 = - unsafe { ::std::mem::transmute(hasGlobalFloatAtomicExch) }; - hasGlobalFloatAtomicExch as u64 - }); - __bindgen_bitfield_unit.set(2usize, 1u8, { - let hasSharedInt32Atomics: u32 = - unsafe { ::std::mem::transmute(hasSharedInt32Atomics) }; - hasSharedInt32Atomics as u64 - }); - __bindgen_bitfield_unit.set(3usize, 1u8, { - let hasSharedFloatAtomicExch: u32 = - unsafe { ::std::mem::transmute(hasSharedFloatAtomicExch) }; - hasSharedFloatAtomicExch as u64 - }); - __bindgen_bitfield_unit.set(4usize, 1u8, { - let hasFloatAtomicAdd: u32 = unsafe { ::std::mem::transmute(hasFloatAtomicAdd) }; - hasFloatAtomicAdd as u64 - }); - __bindgen_bitfield_unit.set(5usize, 1u8, { - let hasGlobalInt64Atomics: u32 = - unsafe { ::std::mem::transmute(hasGlobalInt64Atomics) }; - hasGlobalInt64Atomics as u64 - }); - __bindgen_bitfield_unit.set(6usize, 1u8, { - let hasSharedInt64Atomics: u32 = - unsafe { ::std::mem::transmute(hasSharedInt64Atomics) }; - hasSharedInt64Atomics as u64 - }); - __bindgen_bitfield_unit.set(7usize, 1u8, { - let hasDoubles: u32 = unsafe { ::std::mem::transmute(hasDoubles) }; - hasDoubles as u64 - }); - __bindgen_bitfield_unit.set(8usize, 1u8, { - let hasWarpVote: u32 = unsafe { ::std::mem::transmute(hasWarpVote) }; - hasWarpVote as u64 - }); - __bindgen_bitfield_unit.set(9usize, 1u8, { - let hasWarpBallot: u32 = unsafe { ::std::mem::transmute(hasWarpBallot) }; - hasWarpBallot as u64 - }); - __bindgen_bitfield_unit.set(10usize, 1u8, { - let hasWarpShuffle: u32 = unsafe { ::std::mem::transmute(hasWarpShuffle) }; - hasWarpShuffle as u64 - }); - __bindgen_bitfield_unit.set(11usize, 1u8, { - let hasFunnelShift: u32 = unsafe { ::std::mem::transmute(hasFunnelShift) }; - hasFunnelShift as u64 - }); - __bindgen_bitfield_unit.set(12usize, 1u8, { - let hasThreadFenceSystem: u32 = unsafe { ::std::mem::transmute(hasThreadFenceSystem) }; - hasThreadFenceSystem as u64 - }); - __bindgen_bitfield_unit.set(13usize, 1u8, { - let hasSyncThreadsExt: u32 = unsafe { ::std::mem::transmute(hasSyncThreadsExt) }; - hasSyncThreadsExt as u64 - }); - __bindgen_bitfield_unit.set(14usize, 1u8, { - let hasSurfaceFuncs: u32 = unsafe { ::std::mem::transmute(hasSurfaceFuncs) }; - hasSurfaceFuncs as u64 - }); - __bindgen_bitfield_unit.set(15usize, 1u8, { - let has3dGrid: u32 = unsafe { ::std::mem::transmute(has3dGrid) }; - has3dGrid as u64 - }); - __bindgen_bitfield_unit.set(16usize, 1u8, { - let hasDynamicParallelism: u32 = - unsafe { ::std::mem::transmute(hasDynamicParallelism) }; - hasDynamicParallelism as u64 - }); - __bindgen_bitfield_unit - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipUUID_t { - pub bytes: [::std::os::raw::c_char; 16usize], -} -pub type hipUUID = hipUUID_t; -#[doc = " hipDeviceProp\n"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipDeviceProp_tR0600 { - #[doc = "< Device name."] - pub name: [::std::os::raw::c_char; 256usize], - #[doc = "< UUID of a device"] - pub uuid: hipUUID, - #[doc = "< 8-byte unique identifier. Only valid on windows"] - pub luid: [::std::os::raw::c_char; 8usize], - #[doc = "< LUID node mask"] - pub luidDeviceNodeMask: ::std::os::raw::c_uint, - #[doc = "< Size of global memory region (in bytes)."] - pub totalGlobalMem: usize, - #[doc = "< Size of shared memory region (in bytes)."] - pub sharedMemPerBlock: usize, - #[doc = "< Registers per block."] - pub regsPerBlock: ::std::os::raw::c_int, - #[doc = "< Warp size."] - pub warpSize: ::std::os::raw::c_int, - #[doc = "< Maximum pitch in bytes allowed by memory copies\n< pitched memory"] - pub memPitch: usize, - #[doc = "< Max work items per work group or workgroup max size."] - pub maxThreadsPerBlock: ::std::os::raw::c_int, - #[doc = "< Max number of threads in each dimension (XYZ) of a block."] - pub maxThreadsDim: [::std::os::raw::c_int; 3usize], - #[doc = "< Max grid dimensions (XYZ)."] - pub maxGridSize: [::std::os::raw::c_int; 3usize], - #[doc = "< Max clock frequency of the multiProcessors in khz."] - pub clockRate: ::std::os::raw::c_int, - #[doc = "< Size of shared memory region (in bytes)."] - pub totalConstMem: usize, - #[doc = "< Major compute capability. On HCC, this is an approximation and features may\n< differ from CUDA CC. See the arch feature flags for portable ways to query\n< feature caps."] - pub major: ::std::os::raw::c_int, - #[doc = "< Minor compute capability. On HCC, this is an approximation and features may\n< differ from CUDA CC. See the arch feature flags for portable ways to query\n< feature caps."] - pub minor: ::std::os::raw::c_int, - #[doc = "< Alignment requirement for textures"] - pub textureAlignment: usize, - #[doc = "< Pitch alignment requirement for texture references bound to"] - pub texturePitchAlignment: usize, - #[doc = "< Deprecated. Use asyncEngineCount instead"] - pub deviceOverlap: ::std::os::raw::c_int, - #[doc = "< Number of multi-processors (compute units)."] - pub multiProcessorCount: ::std::os::raw::c_int, - #[doc = "< Run time limit for kernels executed on the device"] - pub kernelExecTimeoutEnabled: ::std::os::raw::c_int, - #[doc = "< APU vs dGPU"] - pub integrated: ::std::os::raw::c_int, - #[doc = "< Check whether HIP can map host memory"] - pub canMapHostMemory: ::std::os::raw::c_int, - #[doc = "< Compute mode."] - pub computeMode: ::std::os::raw::c_int, - #[doc = "< Maximum number of elements in 1D images"] - pub maxTexture1D: ::std::os::raw::c_int, - #[doc = "< Maximum 1D mipmap texture size"] - pub maxTexture1DMipmap: ::std::os::raw::c_int, - #[doc = "< Maximum size for 1D textures bound to linear memory"] - pub maxTexture1DLinear: ::std::os::raw::c_int, - #[doc = "< Maximum dimensions (width, height) of 2D images, in image elements"] - pub maxTexture2D: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum number of elements in 2D array mipmap of images"] - pub maxTexture2DMipmap: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 2D tex dimensions if tex are bound to pitched memory"] - pub maxTexture2DLinear: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum 2D tex dimensions if gather has to be performed"] - pub maxTexture2DGather: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum dimensions (width, height, depth) of 3D images, in image\n< elements"] - pub maxTexture3D: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum alternate 3D texture dims"] - pub maxTexture3DAlt: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum cubemap texture dims"] - pub maxTextureCubemap: ::std::os::raw::c_int, - #[doc = "< Maximum number of elements in 1D array images"] - pub maxTexture1DLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum number of elements in 2D array images"] - pub maxTexture2DLayered: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum cubemaps layered texture dims"] - pub maxTextureCubemapLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 1D surface size"] - pub maxSurface1D: ::std::os::raw::c_int, - #[doc = "< Maximum 2D surface size"] - pub maxSurface2D: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 3D surface size"] - pub maxSurface3D: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum 1D layered surface size"] - pub maxSurface1DLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 2D layared surface size"] - pub maxSurface2DLayered: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum cubemap surface size"] - pub maxSurfaceCubemap: ::std::os::raw::c_int, - #[doc = "< Maximum cubemap layered surface size"] - pub maxSurfaceCubemapLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Alignment requirement for surface"] - pub surfaceAlignment: usize, - #[doc = "< Device can possibly execute multiple kernels concurrently."] - pub concurrentKernels: ::std::os::raw::c_int, - #[doc = "< Device has ECC support enabled"] - pub ECCEnabled: ::std::os::raw::c_int, - #[doc = "< PCI Bus ID."] - pub pciBusID: ::std::os::raw::c_int, - #[doc = "< PCI Device ID."] - pub pciDeviceID: ::std::os::raw::c_int, - #[doc = "< PCI Domain ID"] - pub pciDomainID: ::std::os::raw::c_int, - #[doc = "< 1:If device is Tesla device using TCC driver, else 0"] - pub tccDriver: ::std::os::raw::c_int, - #[doc = "< Number of async engines"] - pub asyncEngineCount: ::std::os::raw::c_int, - #[doc = "< Does device and host share unified address space"] - pub unifiedAddressing: ::std::os::raw::c_int, - #[doc = "< Max global memory clock frequency in khz."] - pub memoryClockRate: ::std::os::raw::c_int, - #[doc = "< Global memory bus width in bits."] - pub memoryBusWidth: ::std::os::raw::c_int, - #[doc = "< L2 cache size."] - pub l2CacheSize: ::std::os::raw::c_int, - #[doc = "< Device's max L2 persisting lines in bytes"] - pub persistingL2CacheMaxSize: ::std::os::raw::c_int, - #[doc = "< Maximum resident threads per multi-processor."] - pub maxThreadsPerMultiProcessor: ::std::os::raw::c_int, - #[doc = "< Device supports stream priority"] - pub streamPrioritiesSupported: ::std::os::raw::c_int, - #[doc = "< Indicates globals are cached in L1"] - pub globalL1CacheSupported: ::std::os::raw::c_int, - #[doc = "< Locals are cahced in L1"] - pub localL1CacheSupported: ::std::os::raw::c_int, - #[doc = "< Amount of shared memory available per multiprocessor."] - pub sharedMemPerMultiprocessor: usize, - #[doc = "< registers available per multiprocessor"] - pub regsPerMultiprocessor: ::std::os::raw::c_int, - #[doc = "< Device supports allocating managed memory on this system"] - pub managedMemory: ::std::os::raw::c_int, - #[doc = "< 1 if device is on a multi-GPU board, 0 if not."] - pub isMultiGpuBoard: ::std::os::raw::c_int, - #[doc = "< Unique identifier for a group of devices on same multiboard GPU"] - pub multiGpuBoardGroupID: ::std::os::raw::c_int, - #[doc = "< Link between host and device supports native atomics"] - pub hostNativeAtomicSupported: ::std::os::raw::c_int, - #[doc = "< Deprecated. CUDA only."] - pub singleToDoublePrecisionPerfRatio: ::std::os::raw::c_int, - #[doc = "< Device supports coherently accessing pageable memory\n< without calling hipHostRegister on it"] - pub pageableMemoryAccess: ::std::os::raw::c_int, - #[doc = "< Device can coherently access managed memory concurrently with\n< the CPU"] - pub concurrentManagedAccess: ::std::os::raw::c_int, - #[doc = "< Is compute preemption supported on the device"] - pub computePreemptionSupported: ::std::os::raw::c_int, - #[doc = "< Device can access host registered memory with same\n< address as the host"] - pub canUseHostPointerForRegisteredMem: ::std::os::raw::c_int, - #[doc = "< HIP device supports cooperative launch"] - pub cooperativeLaunch: ::std::os::raw::c_int, - #[doc = "< HIP device supports cooperative launch on multiple\n< devices"] - pub cooperativeMultiDeviceLaunch: ::std::os::raw::c_int, - #[doc = "< Per device m ax shared mem per block usable by special opt in"] - pub sharedMemPerBlockOptin: usize, - #[doc = "< Device accesses pageable memory via the host's\n< page tables"] - pub pageableMemoryAccessUsesHostPageTables: ::std::os::raw::c_int, - #[doc = "< Host can directly access managed memory on the device\n< without migration"] - pub directManagedMemAccessFromHost: ::std::os::raw::c_int, - #[doc = "< Max number of blocks on CU"] - pub maxBlocksPerMultiProcessor: ::std::os::raw::c_int, - #[doc = "< Max value of access policy window"] - pub accessPolicyMaxWindowSize: ::std::os::raw::c_int, - #[doc = "< Shared memory reserved by driver per block"] - pub reservedSharedMemPerBlock: usize, - #[doc = "< Device supports hipHostRegister"] - pub hostRegisterSupported: ::std::os::raw::c_int, - #[doc = "< Indicates if device supports sparse hip arrays"] - pub sparseHipArraySupported: ::std::os::raw::c_int, - #[doc = "< Device supports using the hipHostRegisterReadOnly flag\n< with hipHostRegistger"] - pub hostRegisterReadOnlySupported: ::std::os::raw::c_int, - #[doc = "< Indicates external timeline semaphore support"] - pub timelineSemaphoreInteropSupported: ::std::os::raw::c_int, - #[doc = "< Indicates if device supports hipMallocAsync and hipMemPool APIs"] - pub memoryPoolsSupported: ::std::os::raw::c_int, - #[doc = "< Indicates device support of RDMA APIs"] - pub gpuDirectRDMASupported: ::std::os::raw::c_int, - #[doc = "< Bitmask to be interpreted according to\n< hipFlushGPUDirectRDMAWritesOptions"] - pub gpuDirectRDMAFlushWritesOptions: ::std::os::raw::c_uint, - #[doc = "< value of hipGPUDirectRDMAWritesOrdering"] - pub gpuDirectRDMAWritesOrdering: ::std::os::raw::c_int, - #[doc = "< Bitmask of handle types support with mempool based IPC"] - pub memoryPoolSupportedHandleTypes: ::std::os::raw::c_uint, - #[doc = "< Device supports deferred mapping HIP arrays and HIP\n< mipmapped arrays"] - pub deferredMappingHipArraySupported: ::std::os::raw::c_int, - #[doc = "< Device supports IPC events"] - pub ipcEventSupported: ::std::os::raw::c_int, - #[doc = "< Device supports cluster launch"] - pub clusterLaunch: ::std::os::raw::c_int, - #[doc = "< Indicates device supports unified function pointers"] - pub unifiedFunctionPointers: ::std::os::raw::c_int, - #[doc = "< CUDA Reserved."] - pub reserved: [::std::os::raw::c_int; 63usize], - #[doc = "< Reserved for adding new entries for HIP/CUDA."] - pub hipReserved: [::std::os::raw::c_int; 32usize], - #[doc = "< AMD GCN Arch Name. HIP Only."] - pub gcnArchName: [::std::os::raw::c_char; 256usize], - #[doc = "< Maximum Shared Memory Per CU. HIP Only."] - pub maxSharedMemoryPerMultiProcessor: usize, - #[doc = "< Frequency in khz of the timer used by the device-side \"clock*\"\n< instructions. New for HIP."] - pub clockInstructionRate: ::std::os::raw::c_int, - #[doc = "< Architectural feature flags. New for HIP."] - pub arch: hipDeviceArch_t, - #[doc = "< Addres of HDP_MEM_COHERENCY_FLUSH_CNTL register"] - pub hdpMemFlushCntl: *mut ::std::os::raw::c_uint, - #[doc = "< Addres of HDP_REG_COHERENCY_FLUSH_CNTL register"] - pub hdpRegFlushCntl: *mut ::std::os::raw::c_uint, - #[doc = "< HIP device supports cooperative launch on\n< multiple"] - pub cooperativeMultiDeviceUnmatchedFunc: ::std::os::raw::c_int, - #[doc = "< HIP device supports cooperative launch on\n< multiple"] - pub cooperativeMultiDeviceUnmatchedGridDim: ::std::os::raw::c_int, - #[doc = "< HIP device supports cooperative launch on\n< multiple"] - pub cooperativeMultiDeviceUnmatchedBlockDim: ::std::os::raw::c_int, - #[doc = "< HIP device supports cooperative launch on\n< multiple"] - pub cooperativeMultiDeviceUnmatchedSharedMem: ::std::os::raw::c_int, - #[doc = "< 1: if it is a large PCI bar device, else 0"] - pub isLargeBar: ::std::os::raw::c_int, - #[doc = "< Revision of the GPU in this device"] - pub asicRevision: ::std::os::raw::c_int, -} -impl hipMemoryType { - #[doc = "< Unregistered memory"] - pub const hipMemoryTypeUnregistered: hipMemoryType = hipMemoryType(0); -} -impl hipMemoryType { - #[doc = "< Memory is physically located on host"] - pub const hipMemoryTypeHost: hipMemoryType = hipMemoryType(1); -} -impl hipMemoryType { - #[doc = "< Memory is physically located on device. (see deviceId for\n< specific device)"] - pub const hipMemoryTypeDevice: hipMemoryType = hipMemoryType(2); -} -impl hipMemoryType { - #[doc = "< Managed memory, automaticallly managed by the unified\n< memory system\n< place holder for new values."] - pub const hipMemoryTypeManaged: hipMemoryType = hipMemoryType(3); -} -impl hipMemoryType { - #[doc = "< Array memory, physically located on device. (see deviceId for\n< specific device)"] - pub const hipMemoryTypeArray: hipMemoryType = hipMemoryType(10); -} -impl hipMemoryType { - #[doc = "< unified address space"] - pub const hipMemoryTypeUnified: hipMemoryType = hipMemoryType(11); -} -#[repr(transparent)] -#[doc = " hipMemoryType (for pointer attributes)\n\n @note hipMemoryType enum values are combination of cudaMemoryType and cuMemoryType and AMD specific enum values.\n"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemoryType(pub ::std::os::raw::c_uint); -#[doc = " Pointer attributes"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipPointerAttribute_t { - pub type_: hipMemoryType, - pub device: ::std::os::raw::c_int, - pub devicePointer: *mut ::std::os::raw::c_void, - pub hostPointer: *mut ::std::os::raw::c_void, - pub isManaged: ::std::os::raw::c_int, - pub allocationFlags: ::std::os::raw::c_uint, -} -impl hipErrorCode_t { - #[doc = "< Successful completion."] - pub const hipSuccess: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(0)}); -} -impl hipErrorCode_t { - #[doc = "< One or more of the parameters passed to the API call is NULL\n< or not in an acceptable range."] - pub const hipErrorInvalidValue: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(1)}); -} -impl hipErrorCode_t { - #[doc = "< out of memory range."] - pub const hipErrorOutOfMemory: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(2)}); -} -impl hipErrorCode_t { - #[doc = "< Memory allocation error."] - pub const hipErrorMemoryAllocation: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(2)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid not initialized"] - pub const hipErrorNotInitialized: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(3)}); -} -impl hipErrorCode_t { - pub const hipErrorInitializationError: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(3)}); -} -impl hipErrorCode_t { - #[doc = "< Deinitialized"] - pub const hipErrorDeinitialized: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(4)}); -} -impl hipErrorCode_t { - pub const hipErrorProfilerDisabled: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(5)}); -} -impl hipErrorCode_t { - pub const hipErrorProfilerNotInitialized: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(6)}); -} -impl hipErrorCode_t { - pub const hipErrorProfilerAlreadyStarted: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(7)}); -} -impl hipErrorCode_t { - pub const hipErrorProfilerAlreadyStopped: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(8)}); -} -impl hipErrorCode_t { - #[doc = "< Invalide configuration"] - pub const hipErrorInvalidConfiguration: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(9)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid pitch value"] - pub const hipErrorInvalidPitchValue: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(12)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid symbol"] - pub const hipErrorInvalidSymbol: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(13)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid Device Pointer"] - pub const hipErrorInvalidDevicePointer: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(17)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid memory copy direction"] - pub const hipErrorInvalidMemcpyDirection: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(21)}); -} -impl hipErrorCode_t { - pub const hipErrorInsufficientDriver: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(35)}); -} -impl hipErrorCode_t { - pub const hipErrorMissingConfiguration: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(52)}); -} -impl hipErrorCode_t { - pub const hipErrorPriorLaunchFailure: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(53)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid device function"] - pub const hipErrorInvalidDeviceFunction: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(98)}); -} -impl hipErrorCode_t { - #[doc = "< Call to hipGetDeviceCount returned 0 devices"] - pub const hipErrorNoDevice: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(100)}); -} -impl hipErrorCode_t { - #[doc = "< DeviceID must be in range from 0 to compute-devices."] - pub const hipErrorInvalidDevice: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(101)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid image"] - pub const hipErrorInvalidImage: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(200)}); -} -impl hipErrorCode_t { - #[doc = "< Produced when input context is invalid."] - pub const hipErrorInvalidContext: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(201)}); -} -impl hipErrorCode_t { - pub const hipErrorContextAlreadyCurrent: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(202)}); -} -impl hipErrorCode_t { - pub const hipErrorMapFailed: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(205)}); -} -impl hipErrorCode_t { - #[doc = "< Produced when the IPC memory attach failed from ROCr."] - pub const hipErrorMapBufferObjectFailed: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(205)}); -} -impl hipErrorCode_t { - pub const hipErrorUnmapFailed: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(206)}); -} -impl hipErrorCode_t { - pub const hipErrorArrayIsMapped: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(207)}); -} -impl hipErrorCode_t { - pub const hipErrorAlreadyMapped: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(208)}); -} -impl hipErrorCode_t { - pub const hipErrorNoBinaryForGpu: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(209)}); -} -impl hipErrorCode_t { - pub const hipErrorAlreadyAcquired: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(210)}); -} -impl hipErrorCode_t { - pub const hipErrorNotMapped: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(211)}); -} -impl hipErrorCode_t { - pub const hipErrorNotMappedAsArray: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(212)}); -} -impl hipErrorCode_t { - pub const hipErrorNotMappedAsPointer: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(213)}); -} -impl hipErrorCode_t { - pub const hipErrorECCNotCorrectable: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(214)}); -} -impl hipErrorCode_t { - #[doc = "< Unsupported limit"] - pub const hipErrorUnsupportedLimit: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(215)}); -} -impl hipErrorCode_t { - #[doc = "< The context is already in use"] - pub const hipErrorContextAlreadyInUse: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(216)}); -} -impl hipErrorCode_t { - pub const hipErrorPeerAccessUnsupported: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(217)}); -} -impl hipErrorCode_t { - #[doc = "< In CUDA DRV, it is CUDA_ERROR_INVALID_PTX"] - pub const hipErrorInvalidKernelFile: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(218)}); -} -impl hipErrorCode_t { - pub const hipErrorInvalidGraphicsContext: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(219)}); -} -impl hipErrorCode_t { - #[doc = "< Invalid source."] - pub const hipErrorInvalidSource: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(300)}); -} -impl hipErrorCode_t { - #[doc = "< the file is not found."] - pub const hipErrorFileNotFound: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(301)}); -} -impl hipErrorCode_t { - pub const hipErrorSharedObjectSymbolNotFound: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(302)}); -} -impl hipErrorCode_t { - #[doc = "< Failed to initialize shared object."] - pub const hipErrorSharedObjectInitFailed: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(303)}); -} -impl hipErrorCode_t { - #[doc = "< Not the correct operating system"] - pub const hipErrorOperatingSystem: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(304)}); -} -impl hipErrorCode_t { - #[doc = "< Invalide handle"] - pub const hipErrorInvalidHandle: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(400)}); -} -impl hipErrorCode_t { - #[doc = "< Resource handle (hipEvent_t or hipStream_t) invalid."] - pub const hipErrorInvalidResourceHandle: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(400)}); -} -impl hipErrorCode_t { - #[doc = "< Resource required is not in a valid state to perform operation."] - pub const hipErrorIllegalState: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(401)}); -} -impl hipErrorCode_t { - #[doc = "< Not found"] - pub const hipErrorNotFound: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(500)}); -} -impl hipErrorCode_t { - #[doc = "< Indicates that asynchronous operations enqueued earlier are not\n< ready. This is not actually an error, but is used to distinguish\n< from hipSuccess (which indicates completion). APIs that return\n< this error include hipEventQuery and hipStreamQuery."] - pub const hipErrorNotReady: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(600)}); -} -impl hipErrorCode_t { - pub const hipErrorIllegalAddress: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(700)}); -} -impl hipErrorCode_t { - #[doc = "< Out of resources error."] - pub const hipErrorLaunchOutOfResources: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(701)}); -} -impl hipErrorCode_t { - #[doc = "< Timeout for the launch."] - pub const hipErrorLaunchTimeOut: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(702)}); -} -impl hipErrorCode_t { - #[doc = "< Peer access was already enabled from the current\n< device."] - pub const hipErrorPeerAccessAlreadyEnabled: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(704)}); -} -impl hipErrorCode_t { - #[doc = "< Peer access was never enabled from the current device."] - pub const hipErrorPeerAccessNotEnabled: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(705)}); -} -impl hipErrorCode_t { - #[doc = "< The process is active."] - pub const hipErrorSetOnActiveProcess: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(708)}); -} -impl hipErrorCode_t { - #[doc = "< The context is already destroyed"] - pub const hipErrorContextIsDestroyed: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(709)}); -} -impl hipErrorCode_t { - #[doc = "< Produced when the kernel calls assert."] - pub const hipErrorAssert: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(710)}); -} -impl hipErrorCode_t { - #[doc = "< Produced when trying to lock a page-locked\n< memory."] - pub const hipErrorHostMemoryAlreadyRegistered: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(712)}); -} -impl hipErrorCode_t { - #[doc = "< Produced when trying to unlock a non-page-locked\n< memory."] - pub const hipErrorHostMemoryNotRegistered: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(713)}); -} -impl hipErrorCode_t { - #[doc = "< An exception occurred on the device while executing a kernel."] - pub const hipErrorLaunchFailure: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(719)}); -} -impl hipErrorCode_t { - #[doc = "< This error indicates that the number of blocks\n< launched per grid for a kernel that was launched\n< via cooperative launch APIs exceeds the maximum\n< number of allowed blocks for the current device."] - pub const hipErrorCooperativeLaunchTooLarge: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(720)}); -} -impl hipErrorCode_t { - #[doc = "< Produced when the hip API is not supported/implemented"] - pub const hipErrorNotSupported: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(801)}); -} -impl hipErrorCode_t { - #[doc = "< The operation is not permitted when the stream\n< is capturing."] - pub const hipErrorStreamCaptureUnsupported: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(900)}); -} -impl hipErrorCode_t { - #[doc = "< The current capture sequence on the stream\n< has been invalidated due to a previous error."] - pub const hipErrorStreamCaptureInvalidated: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(901)}); -} -impl hipErrorCode_t { - #[doc = "< The operation would have resulted in a merge of\n< two independent capture sequences."] - pub const hipErrorStreamCaptureMerge: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(902)}); -} -impl hipErrorCode_t { - #[doc = "< The capture was not initiated in this stream."] - pub const hipErrorStreamCaptureUnmatched: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(903)}); -} -impl hipErrorCode_t { - #[doc = "< The capture sequence contains a fork that was not\n< joined to the primary stream."] - pub const hipErrorStreamCaptureUnjoined: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(904)}); -} -impl hipErrorCode_t { - #[doc = "< A dependency would have been created which crosses\n< the capture sequence boundary. Only implicit\n< in-stream ordering dependencies are allowed\n< to cross the boundary"] - pub const hipErrorStreamCaptureIsolation: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(905)}); -} -impl hipErrorCode_t { - #[doc = "< The operation would have resulted in a disallowed\n< implicit dependency on a current capture sequence\n< from hipStreamLegacy."] - pub const hipErrorStreamCaptureImplicit: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(906)}); -} -impl hipErrorCode_t { - #[doc = "< The operation is not permitted on an event which was last\n< recorded in a capturing stream."] - pub const hipErrorCapturedEvent: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(907)}); -} -impl hipErrorCode_t { - #[doc = "< A stream capture sequence not initiated with\n< the hipStreamCaptureModeRelaxed argument to\n< hipStreamBeginCapture was passed to\n< hipStreamEndCapture in a different thread."] - pub const hipErrorStreamCaptureWrongThread: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(908)}); -} -impl hipErrorCode_t { - #[doc = "< This error indicates that the graph update\n< not performed because it included changes which\n< violated constraintsspecific to instantiated graph\n< update."] - pub const hipErrorGraphExecUpdateFailure: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(910)}); -} -impl hipErrorCode_t { - #[doc = "< Unknown error."] - pub const hipErrorUnknown: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(999)}); -} -impl hipErrorCode_t { - #[doc = "< HSA runtime memory call returned error. Typically not seen\n< in production systems."] - pub const hipErrorRuntimeMemory: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(1052)}); -} -impl hipErrorCode_t { - #[doc = "< HSA runtime call other than memory returned error. Typically\n< not seen in production systems."] - pub const hipErrorRuntimeOther: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(1053)}); -} -impl hipErrorCode_t { - #[doc = "< Marker that more error codes are needed."] - pub const hipErrorTbd: hipErrorCode_t = hipErrorCode_t(unsafe{::std::num::NonZeroU32::new_unchecked(1054)}); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)] -pub struct hipErrorCode_t(pub ::std::num::NonZeroU32); -#[doc = " HIP error type\n"] -#[must_use] -pub type hipError_t = Result<(), hipErrorCode_t>; -// Size check -const _: fn() = || { - let _ = std::mem::transmute::; -}; -impl hipDeviceAttribute_t { - pub const hipDeviceAttributeCudaCompatibleBegin: hipDeviceAttribute_t = hipDeviceAttribute_t(0); -} -impl hipDeviceAttribute_t { - #[doc = "< Whether ECC support is enabled."] - pub const hipDeviceAttributeEccEnabled: hipDeviceAttribute_t = hipDeviceAttribute_t(0); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. The maximum size of the window policy in bytes."] - pub const hipDeviceAttributeAccessPolicyMaxWindowSize: hipDeviceAttribute_t = - hipDeviceAttribute_t(1); -} -impl hipDeviceAttribute_t { - #[doc = "< Asynchronous engines number."] - pub const hipDeviceAttributeAsyncEngineCount: hipDeviceAttribute_t = hipDeviceAttribute_t(2); -} -impl hipDeviceAttribute_t { - #[doc = "< Whether host memory can be mapped into device address space"] - pub const hipDeviceAttributeCanMapHostMemory: hipDeviceAttribute_t = hipDeviceAttribute_t(3); -} -impl hipDeviceAttribute_t { - #[doc = "< Device can access host registered memory\n< at the same virtual address as the CPU"] - pub const hipDeviceAttributeCanUseHostPointerForRegisteredMem: hipDeviceAttribute_t = - hipDeviceAttribute_t(4); -} -impl hipDeviceAttribute_t { - #[doc = "< Peak clock frequency in kilohertz."] - pub const hipDeviceAttributeClockRate: hipDeviceAttribute_t = hipDeviceAttribute_t(5); -} -impl hipDeviceAttribute_t { - #[doc = "< Compute mode that device is currently in."] - pub const hipDeviceAttributeComputeMode: hipDeviceAttribute_t = hipDeviceAttribute_t(6); -} -impl hipDeviceAttribute_t { - #[doc = "< Device supports Compute Preemption."] - pub const hipDeviceAttributeComputePreemptionSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(7); -} -impl hipDeviceAttribute_t { - #[doc = "< Device can possibly execute multiple kernels concurrently."] - pub const hipDeviceAttributeConcurrentKernels: hipDeviceAttribute_t = hipDeviceAttribute_t(8); -} -impl hipDeviceAttribute_t { - #[doc = "< Device can coherently access managed memory concurrently with the CPU"] - pub const hipDeviceAttributeConcurrentManagedAccess: hipDeviceAttribute_t = - hipDeviceAttribute_t(9); -} -impl hipDeviceAttribute_t { - #[doc = "< Support cooperative launch"] - pub const hipDeviceAttributeCooperativeLaunch: hipDeviceAttribute_t = hipDeviceAttribute_t(10); -} -impl hipDeviceAttribute_t { - #[doc = "< Support cooperative launch on multiple devices"] - pub const hipDeviceAttributeCooperativeMultiDeviceLaunch: hipDeviceAttribute_t = - hipDeviceAttribute_t(11); -} -impl hipDeviceAttribute_t { - #[doc = "< Device can concurrently copy memory and execute a kernel.\n< Deprecated. Use instead asyncEngineCount."] - pub const hipDeviceAttributeDeviceOverlap: hipDeviceAttribute_t = hipDeviceAttribute_t(12); -} -impl hipDeviceAttribute_t { - #[doc = "< Host can directly access managed memory on\n< the device without migration"] - pub const hipDeviceAttributeDirectManagedMemAccessFromHost: hipDeviceAttribute_t = - hipDeviceAttribute_t(13); -} -impl hipDeviceAttribute_t { - #[doc = "< Device supports caching globals in L1"] - pub const hipDeviceAttributeGlobalL1CacheSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(14); -} -impl hipDeviceAttribute_t { - #[doc = "< Link between the device and the host supports native atomic operations"] - pub const hipDeviceAttributeHostNativeAtomicSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(15); -} -impl hipDeviceAttribute_t { - #[doc = "< Device is integrated GPU"] - pub const hipDeviceAttributeIntegrated: hipDeviceAttribute_t = hipDeviceAttribute_t(16); -} -impl hipDeviceAttribute_t { - #[doc = "< Multiple GPU devices."] - pub const hipDeviceAttributeIsMultiGpuBoard: hipDeviceAttribute_t = hipDeviceAttribute_t(17); -} -impl hipDeviceAttribute_t { - #[doc = "< Run time limit for kernels executed on the device"] - pub const hipDeviceAttributeKernelExecTimeout: hipDeviceAttribute_t = hipDeviceAttribute_t(18); -} -impl hipDeviceAttribute_t { - #[doc = "< Size of L2 cache in bytes. 0 if the device doesn't have L2 cache."] - pub const hipDeviceAttributeL2CacheSize: hipDeviceAttribute_t = hipDeviceAttribute_t(19); -} -impl hipDeviceAttribute_t { - #[doc = "< caching locals in L1 is supported"] - pub const hipDeviceAttributeLocalL1CacheSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(20); -} -impl hipDeviceAttribute_t { - #[doc = "< 8-byte locally unique identifier in 8 bytes. Undefined on TCC and non-Windows platforms"] - pub const hipDeviceAttributeLuid: hipDeviceAttribute_t = hipDeviceAttribute_t(21); -} -impl hipDeviceAttribute_t { - #[doc = "< Luid device node mask. Undefined on TCC and non-Windows platforms"] - pub const hipDeviceAttributeLuidDeviceNodeMask: hipDeviceAttribute_t = hipDeviceAttribute_t(22); -} -impl hipDeviceAttribute_t { - #[doc = "< Major compute capability version number."] - pub const hipDeviceAttributeComputeCapabilityMajor: hipDeviceAttribute_t = - hipDeviceAttribute_t(23); -} -impl hipDeviceAttribute_t { - #[doc = "< Device supports allocating managed memory on this system"] - pub const hipDeviceAttributeManagedMemory: hipDeviceAttribute_t = hipDeviceAttribute_t(24); -} -impl hipDeviceAttribute_t { - #[doc = "< Max block size per multiprocessor"] - pub const hipDeviceAttributeMaxBlocksPerMultiProcessor: hipDeviceAttribute_t = - hipDeviceAttribute_t(25); -} -impl hipDeviceAttribute_t { - #[doc = "< Max block size in width."] - pub const hipDeviceAttributeMaxBlockDimX: hipDeviceAttribute_t = hipDeviceAttribute_t(26); -} -impl hipDeviceAttribute_t { - #[doc = "< Max block size in height."] - pub const hipDeviceAttributeMaxBlockDimY: hipDeviceAttribute_t = hipDeviceAttribute_t(27); -} -impl hipDeviceAttribute_t { - #[doc = "< Max block size in depth."] - pub const hipDeviceAttributeMaxBlockDimZ: hipDeviceAttribute_t = hipDeviceAttribute_t(28); -} -impl hipDeviceAttribute_t { - #[doc = "< Max grid size in width."] - pub const hipDeviceAttributeMaxGridDimX: hipDeviceAttribute_t = hipDeviceAttribute_t(29); -} -impl hipDeviceAttribute_t { - #[doc = "< Max grid size in height."] - pub const hipDeviceAttributeMaxGridDimY: hipDeviceAttribute_t = hipDeviceAttribute_t(30); -} -impl hipDeviceAttribute_t { - #[doc = "< Max grid size in depth."] - pub const hipDeviceAttributeMaxGridDimZ: hipDeviceAttribute_t = hipDeviceAttribute_t(31); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum size of 1D surface."] - pub const hipDeviceAttributeMaxSurface1D: hipDeviceAttribute_t = hipDeviceAttribute_t(32); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. Maximum dimensions of 1D layered surface."] - pub const hipDeviceAttributeMaxSurface1DLayered: hipDeviceAttribute_t = - hipDeviceAttribute_t(33); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension (width, height) of 2D surface."] - pub const hipDeviceAttributeMaxSurface2D: hipDeviceAttribute_t = hipDeviceAttribute_t(34); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. Maximum dimensions of 2D layered surface."] - pub const hipDeviceAttributeMaxSurface2DLayered: hipDeviceAttribute_t = - hipDeviceAttribute_t(35); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension (width, height, depth) of 3D surface."] - pub const hipDeviceAttributeMaxSurface3D: hipDeviceAttribute_t = hipDeviceAttribute_t(36); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. Maximum dimensions of Cubemap surface."] - pub const hipDeviceAttributeMaxSurfaceCubemap: hipDeviceAttribute_t = hipDeviceAttribute_t(37); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. Maximum dimension of Cubemap layered surface."] - pub const hipDeviceAttributeMaxSurfaceCubemapLayered: hipDeviceAttribute_t = - hipDeviceAttribute_t(38); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum size of 1D texture."] - pub const hipDeviceAttributeMaxTexture1DWidth: hipDeviceAttribute_t = hipDeviceAttribute_t(39); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions of 1D layered texture."] - pub const hipDeviceAttributeMaxTexture1DLayered: hipDeviceAttribute_t = - hipDeviceAttribute_t(40); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum number of elements allocatable in a 1D linear texture.\n< Use cudaDeviceGetTexture1DLinearMaxWidth() instead on Cuda."] - pub const hipDeviceAttributeMaxTexture1DLinear: hipDeviceAttribute_t = hipDeviceAttribute_t(41); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum size of 1D mipmapped texture."] - pub const hipDeviceAttributeMaxTexture1DMipmap: hipDeviceAttribute_t = hipDeviceAttribute_t(42); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension width of 2D texture."] - pub const hipDeviceAttributeMaxTexture2DWidth: hipDeviceAttribute_t = hipDeviceAttribute_t(43); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension hight of 2D texture."] - pub const hipDeviceAttributeMaxTexture2DHeight: hipDeviceAttribute_t = hipDeviceAttribute_t(44); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions of 2D texture if gather operations performed."] - pub const hipDeviceAttributeMaxTexture2DGather: hipDeviceAttribute_t = hipDeviceAttribute_t(45); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions of 2D layered texture."] - pub const hipDeviceAttributeMaxTexture2DLayered: hipDeviceAttribute_t = - hipDeviceAttribute_t(46); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions (width, height, pitch) of 2D textures bound to pitched memory."] - pub const hipDeviceAttributeMaxTexture2DLinear: hipDeviceAttribute_t = hipDeviceAttribute_t(47); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions of 2D mipmapped texture."] - pub const hipDeviceAttributeMaxTexture2DMipmap: hipDeviceAttribute_t = hipDeviceAttribute_t(48); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension width of 3D texture."] - pub const hipDeviceAttributeMaxTexture3DWidth: hipDeviceAttribute_t = hipDeviceAttribute_t(49); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension height of 3D texture."] - pub const hipDeviceAttributeMaxTexture3DHeight: hipDeviceAttribute_t = hipDeviceAttribute_t(50); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension depth of 3D texture."] - pub const hipDeviceAttributeMaxTexture3DDepth: hipDeviceAttribute_t = hipDeviceAttribute_t(51); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions of alternate 3D texture."] - pub const hipDeviceAttributeMaxTexture3DAlt: hipDeviceAttribute_t = hipDeviceAttribute_t(52); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions of Cubemap texture"] - pub const hipDeviceAttributeMaxTextureCubemap: hipDeviceAttribute_t = hipDeviceAttribute_t(53); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimensions of Cubemap layered texture."] - pub const hipDeviceAttributeMaxTextureCubemapLayered: hipDeviceAttribute_t = - hipDeviceAttribute_t(54); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum dimension of a block"] - pub const hipDeviceAttributeMaxThreadsDim: hipDeviceAttribute_t = hipDeviceAttribute_t(55); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum number of threads per block."] - pub const hipDeviceAttributeMaxThreadsPerBlock: hipDeviceAttribute_t = hipDeviceAttribute_t(56); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum resident threads per multiprocessor."] - pub const hipDeviceAttributeMaxThreadsPerMultiProcessor: hipDeviceAttribute_t = - hipDeviceAttribute_t(57); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum pitch in bytes allowed by memory copies"] - pub const hipDeviceAttributeMaxPitch: hipDeviceAttribute_t = hipDeviceAttribute_t(58); -} -impl hipDeviceAttribute_t { - #[doc = "< Global memory bus width in bits."] - pub const hipDeviceAttributeMemoryBusWidth: hipDeviceAttribute_t = hipDeviceAttribute_t(59); -} -impl hipDeviceAttribute_t { - #[doc = "< Peak memory clock frequency in kilohertz."] - pub const hipDeviceAttributeMemoryClockRate: hipDeviceAttribute_t = hipDeviceAttribute_t(60); -} -impl hipDeviceAttribute_t { - #[doc = "< Minor compute capability version number."] - pub const hipDeviceAttributeComputeCapabilityMinor: hipDeviceAttribute_t = - hipDeviceAttribute_t(61); -} -impl hipDeviceAttribute_t { - #[doc = "< Unique ID of device group on the same multi-GPU board"] - pub const hipDeviceAttributeMultiGpuBoardGroupID: hipDeviceAttribute_t = - hipDeviceAttribute_t(62); -} -impl hipDeviceAttribute_t { - #[doc = "< Number of multiprocessors on the device."] - pub const hipDeviceAttributeMultiprocessorCount: hipDeviceAttribute_t = - hipDeviceAttribute_t(63); -} -impl hipDeviceAttribute_t { - #[doc = "< Previously hipDeviceAttributeName"] - pub const hipDeviceAttributeUnused1: hipDeviceAttribute_t = hipDeviceAttribute_t(64); -} -impl hipDeviceAttribute_t { - #[doc = "< Device supports coherently accessing pageable memory\n< without calling hipHostRegister on it"] - pub const hipDeviceAttributePageableMemoryAccess: hipDeviceAttribute_t = - hipDeviceAttribute_t(65); -} -impl hipDeviceAttribute_t { - #[doc = "< Device accesses pageable memory via the host's page tables"] - pub const hipDeviceAttributePageableMemoryAccessUsesHostPageTables: hipDeviceAttribute_t = - hipDeviceAttribute_t(66); -} -impl hipDeviceAttribute_t { - #[doc = "< PCI Bus ID."] - pub const hipDeviceAttributePciBusId: hipDeviceAttribute_t = hipDeviceAttribute_t(67); -} -impl hipDeviceAttribute_t { - #[doc = "< PCI Device ID."] - pub const hipDeviceAttributePciDeviceId: hipDeviceAttribute_t = hipDeviceAttribute_t(68); -} -impl hipDeviceAttribute_t { - #[doc = "< PCI Domain ID."] - pub const hipDeviceAttributePciDomainID: hipDeviceAttribute_t = hipDeviceAttribute_t(69); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum l2 persisting lines capacity in bytes"] - pub const hipDeviceAttributePersistingL2CacheMaxSize: hipDeviceAttribute_t = - hipDeviceAttribute_t(70); -} -impl hipDeviceAttribute_t { - #[doc = "< 32-bit registers available to a thread block. This number is shared\n< by all thread blocks simultaneously resident on a multiprocessor."] - pub const hipDeviceAttributeMaxRegistersPerBlock: hipDeviceAttribute_t = - hipDeviceAttribute_t(71); -} -impl hipDeviceAttribute_t { - #[doc = "< 32-bit registers available per block."] - pub const hipDeviceAttributeMaxRegistersPerMultiprocessor: hipDeviceAttribute_t = - hipDeviceAttribute_t(72); -} -impl hipDeviceAttribute_t { - #[doc = "< Shared memory reserved by CUDA driver per block."] - pub const hipDeviceAttributeReservedSharedMemPerBlock: hipDeviceAttribute_t = - hipDeviceAttribute_t(73); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum shared memory available per block in bytes."] - pub const hipDeviceAttributeMaxSharedMemoryPerBlock: hipDeviceAttribute_t = - hipDeviceAttribute_t(74); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum shared memory per block usable by special opt in."] - pub const hipDeviceAttributeSharedMemPerBlockOptin: hipDeviceAttribute_t = - hipDeviceAttribute_t(75); -} -impl hipDeviceAttribute_t { - #[doc = "< Shared memory available per multiprocessor."] - pub const hipDeviceAttributeSharedMemPerMultiprocessor: hipDeviceAttribute_t = - hipDeviceAttribute_t(76); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. Performance ratio of single precision to double precision."] - pub const hipDeviceAttributeSingleToDoublePrecisionPerfRatio: hipDeviceAttribute_t = - hipDeviceAttribute_t(77); -} -impl hipDeviceAttribute_t { - #[doc = "< Whether to support stream priorities."] - pub const hipDeviceAttributeStreamPrioritiesSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(78); -} -impl hipDeviceAttribute_t { - #[doc = "< Alignment requirement for surfaces"] - pub const hipDeviceAttributeSurfaceAlignment: hipDeviceAttribute_t = hipDeviceAttribute_t(79); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. Whether device is a Tesla device using TCC driver"] - pub const hipDeviceAttributeTccDriver: hipDeviceAttribute_t = hipDeviceAttribute_t(80); -} -impl hipDeviceAttribute_t { - #[doc = "< Alignment requirement for textures"] - pub const hipDeviceAttributeTextureAlignment: hipDeviceAttribute_t = hipDeviceAttribute_t(81); -} -impl hipDeviceAttribute_t { - #[doc = "< Pitch alignment requirement for 2D texture references bound to pitched memory;"] - pub const hipDeviceAttributeTexturePitchAlignment: hipDeviceAttribute_t = - hipDeviceAttribute_t(82); -} -impl hipDeviceAttribute_t { - #[doc = "< Constant memory size in bytes."] - pub const hipDeviceAttributeTotalConstantMemory: hipDeviceAttribute_t = - hipDeviceAttribute_t(83); -} -impl hipDeviceAttribute_t { - #[doc = "< Global memory available on devicice."] - pub const hipDeviceAttributeTotalGlobalMem: hipDeviceAttribute_t = hipDeviceAttribute_t(84); -} -impl hipDeviceAttribute_t { - #[doc = "< Cuda only. An unified address space shared with the host."] - pub const hipDeviceAttributeUnifiedAddressing: hipDeviceAttribute_t = hipDeviceAttribute_t(85); -} -impl hipDeviceAttribute_t { - #[doc = "< Previously hipDeviceAttributeUuid"] - pub const hipDeviceAttributeUnused2: hipDeviceAttribute_t = hipDeviceAttribute_t(86); -} -impl hipDeviceAttribute_t { - #[doc = "< Warp size in threads."] - pub const hipDeviceAttributeWarpSize: hipDeviceAttribute_t = hipDeviceAttribute_t(87); -} -impl hipDeviceAttribute_t { - #[doc = "< Device supports HIP Stream Ordered Memory Allocator"] - pub const hipDeviceAttributeMemoryPoolsSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(88); -} -impl hipDeviceAttribute_t { - #[doc = "< Device supports HIP virtual memory management"] - pub const hipDeviceAttributeVirtualMemoryManagementSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(89); -} -impl hipDeviceAttribute_t { - #[doc = "< Can device support host memory registration via hipHostRegister"] - pub const hipDeviceAttributeHostRegisterSupported: hipDeviceAttribute_t = - hipDeviceAttribute_t(90); -} -impl hipDeviceAttribute_t { - #[doc = "< Supported handle mask for HIP Stream Ordered Memory Allocator"] - pub const hipDeviceAttributeMemoryPoolSupportedHandleTypes: hipDeviceAttribute_t = - hipDeviceAttribute_t(91); -} -impl hipDeviceAttribute_t { - pub const hipDeviceAttributeCudaCompatibleEnd: hipDeviceAttribute_t = - hipDeviceAttribute_t(9999); -} -impl hipDeviceAttribute_t { - pub const hipDeviceAttributeAmdSpecificBegin: hipDeviceAttribute_t = - hipDeviceAttribute_t(10000); -} -impl hipDeviceAttribute_t { - #[doc = "< Frequency in khz of the timer used by the device-side \"clock*\""] - pub const hipDeviceAttributeClockInstructionRate: hipDeviceAttribute_t = - hipDeviceAttribute_t(10000); -} -impl hipDeviceAttribute_t { - #[doc = "< Previously hipDeviceAttributeArch"] - pub const hipDeviceAttributeUnused3: hipDeviceAttribute_t = hipDeviceAttribute_t(10001); -} -impl hipDeviceAttribute_t { - #[doc = "< Maximum Shared Memory PerMultiprocessor."] - pub const hipDeviceAttributeMaxSharedMemoryPerMultiprocessor: hipDeviceAttribute_t = - hipDeviceAttribute_t(10002); -} -impl hipDeviceAttribute_t { - #[doc = "< Previously hipDeviceAttributeGcnArch"] - pub const hipDeviceAttributeUnused4: hipDeviceAttribute_t = hipDeviceAttribute_t(10003); -} -impl hipDeviceAttribute_t { - #[doc = "< Previously hipDeviceAttributeGcnArchName"] - pub const hipDeviceAttributeUnused5: hipDeviceAttribute_t = hipDeviceAttribute_t(10004); -} -impl hipDeviceAttribute_t { - #[doc = "< Address of the HDP_MEM_COHERENCY_FLUSH_CNTL register"] - pub const hipDeviceAttributeHdpMemFlushCntl: hipDeviceAttribute_t = hipDeviceAttribute_t(10005); -} -impl hipDeviceAttribute_t { - #[doc = "< Address of the HDP_REG_COHERENCY_FLUSH_CNTL register"] - pub const hipDeviceAttributeHdpRegFlushCntl: hipDeviceAttribute_t = hipDeviceAttribute_t(10006); -} -impl hipDeviceAttribute_t { - #[doc = "< Supports cooperative launch on multiple\n< devices with unmatched functions"] - pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedFunc: hipDeviceAttribute_t = - hipDeviceAttribute_t(10007); -} -impl hipDeviceAttribute_t { - #[doc = "< Supports cooperative launch on multiple\n< devices with unmatched grid dimensions"] - pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedGridDim: hipDeviceAttribute_t = - hipDeviceAttribute_t(10008); -} -impl hipDeviceAttribute_t { - #[doc = "< Supports cooperative launch on multiple\n< devices with unmatched block dimensions"] - pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedBlockDim: hipDeviceAttribute_t = - hipDeviceAttribute_t(10009); -} -impl hipDeviceAttribute_t { - #[doc = "< Supports cooperative launch on multiple\n< devices with unmatched shared memories"] - pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedSharedMem: hipDeviceAttribute_t = - hipDeviceAttribute_t(10010); -} -impl hipDeviceAttribute_t { - #[doc = "< Whether it is LargeBar"] - pub const hipDeviceAttributeIsLargeBar: hipDeviceAttribute_t = hipDeviceAttribute_t(10011); -} -impl hipDeviceAttribute_t { - #[doc = "< Revision of the GPU in this device"] - pub const hipDeviceAttributeAsicRevision: hipDeviceAttribute_t = hipDeviceAttribute_t(10012); -} -impl hipDeviceAttribute_t { - #[doc = "< '1' if Device supports hipStreamWaitValue32() and\n< hipStreamWaitValue64(), '0' otherwise."] - pub const hipDeviceAttributeCanUseStreamWaitValue: hipDeviceAttribute_t = - hipDeviceAttribute_t(10013); -} -impl hipDeviceAttribute_t { - #[doc = "< '1' if Device supports image, '0' otherwise."] - pub const hipDeviceAttributeImageSupport: hipDeviceAttribute_t = hipDeviceAttribute_t(10014); -} -impl hipDeviceAttribute_t { - #[doc = "< All available physical compute\n< units for the device"] - pub const hipDeviceAttributePhysicalMultiProcessorCount: hipDeviceAttribute_t = - hipDeviceAttribute_t(10015); -} -impl hipDeviceAttribute_t { - #[doc = "< '1' if Device supports fine grain, '0' otherwise"] - pub const hipDeviceAttributeFineGrainSupport: hipDeviceAttribute_t = - hipDeviceAttribute_t(10016); -} -impl hipDeviceAttribute_t { - #[doc = "< Constant frequency of wall clock in kilohertz."] - pub const hipDeviceAttributeWallClockRate: hipDeviceAttribute_t = hipDeviceAttribute_t(10017); -} -impl hipDeviceAttribute_t { - pub const hipDeviceAttributeAmdSpecificEnd: hipDeviceAttribute_t = hipDeviceAttribute_t(19999); -} -impl hipDeviceAttribute_t { - pub const hipDeviceAttributeVendorSpecificBegin: hipDeviceAttribute_t = - hipDeviceAttribute_t(20000); -} -#[repr(transparent)] -#[doc = " hipDeviceAttribute_t\n hipDeviceAttributeUnused number: 5"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipDeviceAttribute_t(pub ::std::os::raw::c_uint); -impl hipComputeMode { - pub const hipComputeModeDefault: hipComputeMode = hipComputeMode(0); -} -impl hipComputeMode { - pub const hipComputeModeExclusive: hipComputeMode = hipComputeMode(1); -} -impl hipComputeMode { - pub const hipComputeModeProhibited: hipComputeMode = hipComputeMode(2); -} -impl hipComputeMode { - pub const hipComputeModeExclusiveProcess: hipComputeMode = hipComputeMode(3); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipComputeMode(pub ::std::os::raw::c_uint); -impl hipFlushGPUDirectRDMAWritesOptions { - pub const hipFlushGPUDirectRDMAWritesOptionHost: hipFlushGPUDirectRDMAWritesOptions = - hipFlushGPUDirectRDMAWritesOptions(1); -} -impl hipFlushGPUDirectRDMAWritesOptions { - pub const hipFlushGPUDirectRDMAWritesOptionMemOps: hipFlushGPUDirectRDMAWritesOptions = - hipFlushGPUDirectRDMAWritesOptions(2); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipFlushGPUDirectRDMAWritesOptions(pub ::std::os::raw::c_uint); -impl hipGPUDirectRDMAWritesOrdering { - pub const hipGPUDirectRDMAWritesOrderingNone: hipGPUDirectRDMAWritesOrdering = - hipGPUDirectRDMAWritesOrdering(0); -} -impl hipGPUDirectRDMAWritesOrdering { - pub const hipGPUDirectRDMAWritesOrderingOwner: hipGPUDirectRDMAWritesOrdering = - hipGPUDirectRDMAWritesOrdering(100); -} -impl hipGPUDirectRDMAWritesOrdering { - pub const hipGPUDirectRDMAWritesOrderingAllDevices: hipGPUDirectRDMAWritesOrdering = - hipGPUDirectRDMAWritesOrdering(200); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipGPUDirectRDMAWritesOrdering(pub ::std::os::raw::c_uint); -#[repr(transparent)] -#[derive(Copy, Clone)] -pub struct hipDeviceptr_t(pub *mut ::std::os::raw::c_void); -impl hipChannelFormatKind { - pub const hipChannelFormatKindSigned: hipChannelFormatKind = hipChannelFormatKind(0); -} -impl hipChannelFormatKind { - pub const hipChannelFormatKindUnsigned: hipChannelFormatKind = hipChannelFormatKind(1); -} -impl hipChannelFormatKind { - pub const hipChannelFormatKindFloat: hipChannelFormatKind = hipChannelFormatKind(2); -} -impl hipChannelFormatKind { - pub const hipChannelFormatKindNone: hipChannelFormatKind = hipChannelFormatKind(3); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipChannelFormatKind(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipChannelFormatDesc { - pub x: ::std::os::raw::c_int, - pub y: ::std::os::raw::c_int, - pub z: ::std::os::raw::c_int, - pub w: ::std::os::raw::c_int, - pub f: hipChannelFormatKind, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipArray { - _unused: [u8; 0], -} -pub type hipArray_t = *mut hipArray; -pub type hipArray_const_t = *const hipArray; -impl hipArray_Format { - pub const HIP_AD_FORMAT_UNSIGNED_INT8: hipArray_Format = hipArray_Format(1); -} -impl hipArray_Format { - pub const HIP_AD_FORMAT_UNSIGNED_INT16: hipArray_Format = hipArray_Format(2); -} -impl hipArray_Format { - pub const HIP_AD_FORMAT_UNSIGNED_INT32: hipArray_Format = hipArray_Format(3); -} -impl hipArray_Format { - pub const HIP_AD_FORMAT_SIGNED_INT8: hipArray_Format = hipArray_Format(8); -} -impl hipArray_Format { - pub const HIP_AD_FORMAT_SIGNED_INT16: hipArray_Format = hipArray_Format(9); -} -impl hipArray_Format { - pub const HIP_AD_FORMAT_SIGNED_INT32: hipArray_Format = hipArray_Format(10); -} -impl hipArray_Format { - pub const HIP_AD_FORMAT_HALF: hipArray_Format = hipArray_Format(16); -} -impl hipArray_Format { - pub const HIP_AD_FORMAT_FLOAT: hipArray_Format = hipArray_Format(32); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipArray_Format(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_ARRAY_DESCRIPTOR { - pub Width: usize, - pub Height: usize, - pub Format: hipArray_Format, - pub NumChannels: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_ARRAY3D_DESCRIPTOR { - pub Width: usize, - pub Height: usize, - pub Depth: usize, - pub Format: hipArray_Format, - pub NumChannels: ::std::os::raw::c_uint, - pub Flags: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hip_Memcpy2D { - pub srcXInBytes: usize, - pub srcY: usize, - pub srcMemoryType: hipMemoryType, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: hipDeviceptr_t, - pub srcArray: hipArray_t, - pub srcPitch: usize, - pub dstXInBytes: usize, - pub dstY: usize, - pub dstMemoryType: hipMemoryType, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: hipDeviceptr_t, - pub dstArray: hipArray_t, - pub dstPitch: usize, - pub WidthInBytes: usize, - pub Height: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMipmappedArray { - pub data: *mut ::std::os::raw::c_void, - pub desc: hipChannelFormatDesc, - pub type_: ::std::os::raw::c_uint, - pub width: ::std::os::raw::c_uint, - pub height: ::std::os::raw::c_uint, - pub depth: ::std::os::raw::c_uint, - pub min_mipmap_level: ::std::os::raw::c_uint, - pub max_mipmap_level: ::std::os::raw::c_uint, - pub flags: ::std::os::raw::c_uint, - pub format: hipArray_Format, - pub num_channels: ::std::os::raw::c_uint, -} -pub type hipMipmappedArray_t = *mut hipMipmappedArray; -pub type hipmipmappedArray = hipMipmappedArray_t; -pub type hipMipmappedArray_const_t = *const hipMipmappedArray; -impl hipResourceType { - pub const hipResourceTypeArray: hipResourceType = hipResourceType(0); -} -impl hipResourceType { - pub const hipResourceTypeMipmappedArray: hipResourceType = hipResourceType(1); -} -impl hipResourceType { - pub const hipResourceTypeLinear: hipResourceType = hipResourceType(2); -} -impl hipResourceType { - pub const hipResourceTypePitch2D: hipResourceType = hipResourceType(3); -} -#[repr(transparent)] -#[doc = " hip resource types"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipResourceType(pub ::std::os::raw::c_uint); -impl HIPresourcetype_enum { - #[doc = "< Array resoure"] - pub const HIP_RESOURCE_TYPE_ARRAY: HIPresourcetype_enum = HIPresourcetype_enum(0); -} -impl HIPresourcetype_enum { - #[doc = "< Mipmapped array resource"] - pub const HIP_RESOURCE_TYPE_MIPMAPPED_ARRAY: HIPresourcetype_enum = HIPresourcetype_enum(1); -} -impl HIPresourcetype_enum { - #[doc = "< Linear resource"] - pub const HIP_RESOURCE_TYPE_LINEAR: HIPresourcetype_enum = HIPresourcetype_enum(2); -} -impl HIPresourcetype_enum { - #[doc = "< Pitch 2D resource"] - pub const HIP_RESOURCE_TYPE_PITCH2D: HIPresourcetype_enum = HIPresourcetype_enum(3); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct HIPresourcetype_enum(pub ::std::os::raw::c_uint); -pub use self::HIPresourcetype_enum as HIPresourcetype; -pub use self::HIPresourcetype_enum as hipResourcetype; -impl HIPaddress_mode_enum { - pub const HIP_TR_ADDRESS_MODE_WRAP: HIPaddress_mode_enum = HIPaddress_mode_enum(0); -} -impl HIPaddress_mode_enum { - pub const HIP_TR_ADDRESS_MODE_CLAMP: HIPaddress_mode_enum = HIPaddress_mode_enum(1); -} -impl HIPaddress_mode_enum { - pub const HIP_TR_ADDRESS_MODE_MIRROR: HIPaddress_mode_enum = HIPaddress_mode_enum(2); -} -impl HIPaddress_mode_enum { - pub const HIP_TR_ADDRESS_MODE_BORDER: HIPaddress_mode_enum = HIPaddress_mode_enum(3); -} -#[repr(transparent)] -#[doc = " hip address modes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct HIPaddress_mode_enum(pub ::std::os::raw::c_uint); -#[doc = " hip address modes"] -pub use self::HIPaddress_mode_enum as HIPaddress_mode; -impl HIPfilter_mode_enum { - pub const HIP_TR_FILTER_MODE_POINT: HIPfilter_mode_enum = HIPfilter_mode_enum(0); -} -impl HIPfilter_mode_enum { - pub const HIP_TR_FILTER_MODE_LINEAR: HIPfilter_mode_enum = HIPfilter_mode_enum(1); -} -#[repr(transparent)] -#[doc = " hip filter modes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct HIPfilter_mode_enum(pub ::std::os::raw::c_uint); -#[doc = " hip filter modes"] -pub use self::HIPfilter_mode_enum as HIPfilter_mode; -#[doc = " Texture descriptor"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_TEXTURE_DESC_st { - #[doc = "< Address modes"] - pub addressMode: [HIPaddress_mode; 3usize], - #[doc = "< Filter mode"] - pub filterMode: HIPfilter_mode, - #[doc = "< Flags"] - pub flags: ::std::os::raw::c_uint, - #[doc = "< Maximum anisotropy ratio"] - pub maxAnisotropy: ::std::os::raw::c_uint, - #[doc = "< Mipmap filter mode"] - pub mipmapFilterMode: HIPfilter_mode, - #[doc = "< Mipmap level bias"] - pub mipmapLevelBias: f32, - #[doc = "< Mipmap minimum level clamp"] - pub minMipmapLevelClamp: f32, - #[doc = "< Mipmap maximum level clamp"] - pub maxMipmapLevelClamp: f32, - #[doc = "< Border Color"] - pub borderColor: [f32; 4usize], - pub reserved: [::std::os::raw::c_int; 12usize], -} -#[doc = " Texture descriptor"] -pub type HIP_TEXTURE_DESC = HIP_TEXTURE_DESC_st; -impl hipResourceViewFormat { - pub const hipResViewFormatNone: hipResourceViewFormat = hipResourceViewFormat(0); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedChar1: hipResourceViewFormat = hipResourceViewFormat(1); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedChar2: hipResourceViewFormat = hipResourceViewFormat(2); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedChar4: hipResourceViewFormat = hipResourceViewFormat(3); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedChar1: hipResourceViewFormat = hipResourceViewFormat(4); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedChar2: hipResourceViewFormat = hipResourceViewFormat(5); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedChar4: hipResourceViewFormat = hipResourceViewFormat(6); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedShort1: hipResourceViewFormat = hipResourceViewFormat(7); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedShort2: hipResourceViewFormat = hipResourceViewFormat(8); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedShort4: hipResourceViewFormat = hipResourceViewFormat(9); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedShort1: hipResourceViewFormat = hipResourceViewFormat(10); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedShort2: hipResourceViewFormat = hipResourceViewFormat(11); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedShort4: hipResourceViewFormat = hipResourceViewFormat(12); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedInt1: hipResourceViewFormat = hipResourceViewFormat(13); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedInt2: hipResourceViewFormat = hipResourceViewFormat(14); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedInt4: hipResourceViewFormat = hipResourceViewFormat(15); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedInt1: hipResourceViewFormat = hipResourceViewFormat(16); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedInt2: hipResourceViewFormat = hipResourceViewFormat(17); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedInt4: hipResourceViewFormat = hipResourceViewFormat(18); -} -impl hipResourceViewFormat { - pub const hipResViewFormatHalf1: hipResourceViewFormat = hipResourceViewFormat(19); -} -impl hipResourceViewFormat { - pub const hipResViewFormatHalf2: hipResourceViewFormat = hipResourceViewFormat(20); -} -impl hipResourceViewFormat { - pub const hipResViewFormatHalf4: hipResourceViewFormat = hipResourceViewFormat(21); -} -impl hipResourceViewFormat { - pub const hipResViewFormatFloat1: hipResourceViewFormat = hipResourceViewFormat(22); -} -impl hipResourceViewFormat { - pub const hipResViewFormatFloat2: hipResourceViewFormat = hipResourceViewFormat(23); -} -impl hipResourceViewFormat { - pub const hipResViewFormatFloat4: hipResourceViewFormat = hipResourceViewFormat(24); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedBlockCompressed1: hipResourceViewFormat = - hipResourceViewFormat(25); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedBlockCompressed2: hipResourceViewFormat = - hipResourceViewFormat(26); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedBlockCompressed3: hipResourceViewFormat = - hipResourceViewFormat(27); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedBlockCompressed4: hipResourceViewFormat = - hipResourceViewFormat(28); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedBlockCompressed4: hipResourceViewFormat = - hipResourceViewFormat(29); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedBlockCompressed5: hipResourceViewFormat = - hipResourceViewFormat(30); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedBlockCompressed5: hipResourceViewFormat = - hipResourceViewFormat(31); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedBlockCompressed6H: hipResourceViewFormat = - hipResourceViewFormat(32); -} -impl hipResourceViewFormat { - pub const hipResViewFormatSignedBlockCompressed6H: hipResourceViewFormat = - hipResourceViewFormat(33); -} -impl hipResourceViewFormat { - pub const hipResViewFormatUnsignedBlockCompressed7: hipResourceViewFormat = - hipResourceViewFormat(34); -} -#[repr(transparent)] -#[doc = " hip texture resource view formats"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipResourceViewFormat(pub ::std::os::raw::c_uint); -impl HIPresourceViewFormat_enum { - #[doc = "< No resource view format (use underlying resource format)"] - pub const HIP_RES_VIEW_FORMAT_NONE: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum(0); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel unsigned 8-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_1X8: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(1); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel unsigned 8-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_2X8: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(2); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel unsigned 8-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_4X8: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(3); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel signed 8-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_1X8: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(4); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel signed 8-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_2X8: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(5); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel signed 8-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_4X8: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(6); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel unsigned 16-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_1X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(7); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel unsigned 16-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_2X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(8); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel unsigned 16-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_4X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(9); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel signed 16-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_1X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(10); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel signed 16-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_2X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(11); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel signed 16-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_4X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(12); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel unsigned 32-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_1X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(13); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel unsigned 32-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_2X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(14); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel unsigned 32-bit integers"] - pub const HIP_RES_VIEW_FORMAT_UINT_4X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(15); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel signed 32-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_1X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(16); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel signed 32-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_2X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(17); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel signed 32-bit integers"] - pub const HIP_RES_VIEW_FORMAT_SINT_4X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(18); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel 16-bit floating point"] - pub const HIP_RES_VIEW_FORMAT_FLOAT_1X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(19); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel 16-bit floating point"] - pub const HIP_RES_VIEW_FORMAT_FLOAT_2X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(20); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel 16-bit floating point"] - pub const HIP_RES_VIEW_FORMAT_FLOAT_4X16: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(21); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 1 channel 32-bit floating point"] - pub const HIP_RES_VIEW_FORMAT_FLOAT_1X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(22); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 2 channel 32-bit floating point"] - pub const HIP_RES_VIEW_FORMAT_FLOAT_2X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(23); -} -impl HIPresourceViewFormat_enum { - #[doc = "< 4 channel 32-bit floating point"] - pub const HIP_RES_VIEW_FORMAT_FLOAT_4X32: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(24); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 1"] - pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC1: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(25); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 2"] - pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC2: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(26); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 3"] - pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC3: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(27); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 4 unsigned"] - pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC4: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(28); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 4 signed"] - pub const HIP_RES_VIEW_FORMAT_SIGNED_BC4: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(29); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 5 unsigned"] - pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC5: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(30); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 5 signed"] - pub const HIP_RES_VIEW_FORMAT_SIGNED_BC5: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(31); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 6 unsigned half-float"] - pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC6H: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(32); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 6 signed half-float"] - pub const HIP_RES_VIEW_FORMAT_SIGNED_BC6H: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(33); -} -impl HIPresourceViewFormat_enum { - #[doc = "< Block compressed 7"] - pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC7: HIPresourceViewFormat_enum = - HIPresourceViewFormat_enum(34); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct HIPresourceViewFormat_enum(pub ::std::os::raw::c_uint); -pub use self::HIPresourceViewFormat_enum as HIPresourceViewFormat; -#[doc = " HIP resource descriptor"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipResourceDesc { - pub resType: hipResourceType, - pub res: hipResourceDesc__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipResourceDesc__bindgen_ty_1 { - pub array: hipResourceDesc__bindgen_ty_1__bindgen_ty_1, - pub mipmap: hipResourceDesc__bindgen_ty_1__bindgen_ty_2, - pub linear: hipResourceDesc__bindgen_ty_1__bindgen_ty_3, - pub pitch2D: hipResourceDesc__bindgen_ty_1__bindgen_ty_4, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_1 { - pub array: hipArray_t, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_2 { - pub mipmap: hipMipmappedArray_t, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_3 { - pub devPtr: *mut ::std::os::raw::c_void, - pub desc: hipChannelFormatDesc, - pub sizeInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_4 { - pub devPtr: *mut ::std::os::raw::c_void, - pub desc: hipChannelFormatDesc, - pub width: usize, - pub height: usize, - pub pitchInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_RESOURCE_DESC_st { - #[doc = "< Resource type"] - pub resType: HIPresourcetype, - pub res: HIP_RESOURCE_DESC_st__bindgen_ty_1, - #[doc = "< Flags (must be zero)"] - pub flags: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union HIP_RESOURCE_DESC_st__bindgen_ty_1 { - pub array: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1, - pub mipmap: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2, - pub linear: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3, - pub pitch2D: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4, - pub reserved: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - #[doc = "< HIP array"] - pub hArray: hipArray_t, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 { - #[doc = "< HIP mipmapped array"] - pub hMipmappedArray: hipMipmappedArray_t, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 { - #[doc = "< Device pointer"] - pub devPtr: hipDeviceptr_t, - #[doc = "< Array format"] - pub format: hipArray_Format, - #[doc = "< Channels per array element"] - pub numChannels: ::std::os::raw::c_uint, - #[doc = "< Size in bytes"] - pub sizeInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 { - #[doc = "< Device pointer"] - pub devPtr: hipDeviceptr_t, - #[doc = "< Array format"] - pub format: hipArray_Format, - #[doc = "< Channels per array element"] - pub numChannels: ::std::os::raw::c_uint, - #[doc = "< Width of the array in elements"] - pub width: usize, - #[doc = "< Height of the array in elements"] - pub height: usize, - #[doc = "< Pitch between two rows in bytes"] - pub pitchInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 { - pub reserved: [::std::os::raw::c_int; 32usize], -} -pub type HIP_RESOURCE_DESC = HIP_RESOURCE_DESC_st; -#[doc = " hip resource view descriptor"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipResourceViewDesc { - pub format: hipResourceViewFormat, - pub width: usize, - pub height: usize, - pub depth: usize, - pub firstMipmapLevel: ::std::os::raw::c_uint, - pub lastMipmapLevel: ::std::os::raw::c_uint, - pub firstLayer: ::std::os::raw::c_uint, - pub lastLayer: ::std::os::raw::c_uint, -} -#[doc = " Resource view descriptor"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_RESOURCE_VIEW_DESC_st { - #[doc = "< Resource view format"] - pub format: HIPresourceViewFormat, - #[doc = "< Width of the resource view"] - pub width: usize, - #[doc = "< Height of the resource view"] - pub height: usize, - #[doc = "< Depth of the resource view"] - pub depth: usize, - #[doc = "< First defined mipmap level"] - pub firstMipmapLevel: ::std::os::raw::c_uint, - #[doc = "< Last defined mipmap level"] - pub lastMipmapLevel: ::std::os::raw::c_uint, - #[doc = "< First layer index"] - pub firstLayer: ::std::os::raw::c_uint, - #[doc = "< Last layer index"] - pub lastLayer: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[doc = " Resource view descriptor"] -pub type HIP_RESOURCE_VIEW_DESC = HIP_RESOURCE_VIEW_DESC_st; -impl hipMemcpyKind { - #[doc = "< Host-to-Host Copy"] - pub const hipMemcpyHostToHost: hipMemcpyKind = hipMemcpyKind(0); -} -impl hipMemcpyKind { - #[doc = "< Host-to-Device Copy"] - pub const hipMemcpyHostToDevice: hipMemcpyKind = hipMemcpyKind(1); -} -impl hipMemcpyKind { - #[doc = "< Device-to-Host Copy"] - pub const hipMemcpyDeviceToHost: hipMemcpyKind = hipMemcpyKind(2); -} -impl hipMemcpyKind { - #[doc = "< Device-to-Device Copy"] - pub const hipMemcpyDeviceToDevice: hipMemcpyKind = hipMemcpyKind(3); -} -impl hipMemcpyKind { - #[doc = "< Runtime will automatically determine\n hipChannelFormatDesc; -} -#[doc = " An opaque value that represents a hip texture object"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct __hip_texture { - _unused: [u8; 0], -} -pub type hipTextureObject_t = *mut __hip_texture; -impl hipTextureAddressMode { - pub const hipAddressModeWrap: hipTextureAddressMode = hipTextureAddressMode(0); -} -impl hipTextureAddressMode { - pub const hipAddressModeClamp: hipTextureAddressMode = hipTextureAddressMode(1); -} -impl hipTextureAddressMode { - pub const hipAddressModeMirror: hipTextureAddressMode = hipTextureAddressMode(2); -} -impl hipTextureAddressMode { - pub const hipAddressModeBorder: hipTextureAddressMode = hipTextureAddressMode(3); -} -#[repr(transparent)] -#[doc = " hip texture address modes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipTextureAddressMode(pub ::std::os::raw::c_uint); -impl hipTextureFilterMode { - pub const hipFilterModePoint: hipTextureFilterMode = hipTextureFilterMode(0); -} -impl hipTextureFilterMode { - pub const hipFilterModeLinear: hipTextureFilterMode = hipTextureFilterMode(1); -} -#[repr(transparent)] -#[doc = " hip texture filter modes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipTextureFilterMode(pub ::std::os::raw::c_uint); -impl hipTextureReadMode { - pub const hipReadModeElementType: hipTextureReadMode = hipTextureReadMode(0); -} -impl hipTextureReadMode { - pub const hipReadModeNormalizedFloat: hipTextureReadMode = hipTextureReadMode(1); -} -#[repr(transparent)] -#[doc = " hip texture read modes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipTextureReadMode(pub ::std::os::raw::c_uint); -#[doc = " hip texture reference"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct textureReference { - pub normalized: ::std::os::raw::c_int, - pub readMode: hipTextureReadMode, - pub filterMode: hipTextureFilterMode, - pub addressMode: [hipTextureAddressMode; 3usize], - pub channelDesc: hipChannelFormatDesc, - pub sRGB: ::std::os::raw::c_int, - pub maxAnisotropy: ::std::os::raw::c_uint, - pub mipmapFilterMode: hipTextureFilterMode, - pub mipmapLevelBias: f32, - pub minMipmapLevelClamp: f32, - pub maxMipmapLevelClamp: f32, - pub textureObject: hipTextureObject_t, - pub numChannels: ::std::os::raw::c_int, - pub format: hipArray_Format, -} -#[doc = " hip texture descriptor"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipTextureDesc { - pub addressMode: [hipTextureAddressMode; 3usize], - pub filterMode: hipTextureFilterMode, - pub readMode: hipTextureReadMode, - pub sRGB: ::std::os::raw::c_int, - pub borderColor: [f32; 4usize], - pub normalizedCoords: ::std::os::raw::c_int, - pub maxAnisotropy: ::std::os::raw::c_uint, - pub mipmapFilterMode: hipTextureFilterMode, - pub mipmapLevelBias: f32, - pub minMipmapLevelClamp: f32, - pub maxMipmapLevelClamp: f32, -} -#[doc = " An opaque value that represents a hip surface object"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct __hip_surface { - _unused: [u8; 0], -} -pub type hipSurfaceObject_t = *mut __hip_surface; -impl hipSurfaceBoundaryMode { - pub const hipBoundaryModeZero: hipSurfaceBoundaryMode = hipSurfaceBoundaryMode(0); -} -impl hipSurfaceBoundaryMode { - pub const hipBoundaryModeTrap: hipSurfaceBoundaryMode = hipSurfaceBoundaryMode(1); -} -impl hipSurfaceBoundaryMode { - pub const hipBoundaryModeClamp: hipSurfaceBoundaryMode = hipSurfaceBoundaryMode(2); -} -#[repr(transparent)] -#[doc = " hip surface boundary modes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipSurfaceBoundaryMode(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipCtx_t { - _unused: [u8; 0], -} -pub type hipCtx_t = *mut ihipCtx_t; -pub type hipDevice_t = ::std::os::raw::c_int; -impl hipDeviceP2PAttr { - pub const hipDevP2PAttrPerformanceRank: hipDeviceP2PAttr = hipDeviceP2PAttr(0); -} -impl hipDeviceP2PAttr { - pub const hipDevP2PAttrAccessSupported: hipDeviceP2PAttr = hipDeviceP2PAttr(1); -} -impl hipDeviceP2PAttr { - pub const hipDevP2PAttrNativeAtomicSupported: hipDeviceP2PAttr = hipDeviceP2PAttr(2); -} -impl hipDeviceP2PAttr { - pub const hipDevP2PAttrHipArrayAccessSupported: hipDeviceP2PAttr = hipDeviceP2PAttr(3); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipDeviceP2PAttr(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipStream_t { - _unused: [u8; 0], -} -pub type hipStream_t = *mut ihipStream_t; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipIpcMemHandle_st { - pub reserved: [::std::os::raw::c_char; 64usize], -} -pub type hipIpcMemHandle_t = hipIpcMemHandle_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipIpcEventHandle_st { - pub reserved: [::std::os::raw::c_char; 64usize], -} -pub type hipIpcEventHandle_t = hipIpcEventHandle_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipModule_t { - _unused: [u8; 0], -} -pub type hipModule_t = *mut ihipModule_t; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipModuleSymbol_t { - _unused: [u8; 0], -} -pub type hipFunction_t = *mut ihipModuleSymbol_t; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipMemPoolHandle_t { - _unused: [u8; 0], -} -#[doc = " HIP memory pool"] -pub type hipMemPool_t = *mut ihipMemPoolHandle_t; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipFuncAttributes { - pub binaryVersion: ::std::os::raw::c_int, - pub cacheModeCA: ::std::os::raw::c_int, - pub constSizeBytes: usize, - pub localSizeBytes: usize, - pub maxDynamicSharedSizeBytes: ::std::os::raw::c_int, - pub maxThreadsPerBlock: ::std::os::raw::c_int, - pub numRegs: ::std::os::raw::c_int, - pub preferredShmemCarveout: ::std::os::raw::c_int, - pub ptxVersion: ::std::os::raw::c_int, - pub sharedSizeBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipEvent_t { - _unused: [u8; 0], -} -pub type hipEvent_t = *mut ihipEvent_t; -impl hipLimit_t { - #[doc = "< Limit of stack size in bytes on the current device, per\n< thread. The size is in units of 256 dwords, up to the\n< limit of (128K - 16)"] - pub const hipLimitStackSize: hipLimit_t = hipLimit_t(0); -} -impl hipLimit_t { - #[doc = "< Size limit in bytes of fifo used by printf call on the\n< device. Currently not supported"] - pub const hipLimitPrintfFifoSize: hipLimit_t = hipLimit_t(1); -} -impl hipLimit_t { - #[doc = "< Limit of heap size in bytes on the current device, should\n< be less than the global memory size on the device"] - pub const hipLimitMallocHeapSize: hipLimit_t = hipLimit_t(2); -} -impl hipLimit_t { - #[doc = "< Supported limit range"] - pub const hipLimitRange: hipLimit_t = hipLimit_t(3); -} -#[repr(transparent)] -#[doc = " hipLimit\n\n @note In HIP device limit-related APIs, any input limit value other than those defined in the\n enum is treated as \"UnsupportedLimit\" by default."] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipLimit_t(pub ::std::os::raw::c_uint); -impl hipMemoryAdvise { - #[doc = "< Data will mostly be read and only occassionally\n< be written to"] - pub const hipMemAdviseSetReadMostly: hipMemoryAdvise = hipMemoryAdvise(1); -} -impl hipMemoryAdvise { - #[doc = "< Undo the effect of hipMemAdviseSetReadMostly"] - pub const hipMemAdviseUnsetReadMostly: hipMemoryAdvise = hipMemoryAdvise(2); -} -impl hipMemoryAdvise { - #[doc = "< Set the preferred location for the data as\n< the specified device"] - pub const hipMemAdviseSetPreferredLocation: hipMemoryAdvise = hipMemoryAdvise(3); -} -impl hipMemoryAdvise { - #[doc = "< Clear the preferred location for the data"] - pub const hipMemAdviseUnsetPreferredLocation: hipMemoryAdvise = hipMemoryAdvise(4); -} -impl hipMemoryAdvise { - #[doc = "< Data will be accessed by the specified device\n< so prevent page faults as much as possible"] - pub const hipMemAdviseSetAccessedBy: hipMemoryAdvise = hipMemoryAdvise(5); -} -impl hipMemoryAdvise { - #[doc = "< Let HIP to decide on the page faulting policy\n< for the specified device"] - pub const hipMemAdviseUnsetAccessedBy: hipMemoryAdvise = hipMemoryAdvise(6); -} -impl hipMemoryAdvise { - #[doc = "< The default memory model is fine-grain. That allows\n< coherent operations between host and device, while\n< executing kernels. The coarse-grain can be used\n< for data that only needs to be coherent at dispatch\n< boundaries for better performance"] - pub const hipMemAdviseSetCoarseGrain: hipMemoryAdvise = hipMemoryAdvise(100); -} -impl hipMemoryAdvise { - #[doc = "< Restores cache coherency policy back to fine-grain"] - pub const hipMemAdviseUnsetCoarseGrain: hipMemoryAdvise = hipMemoryAdvise(101); -} -#[repr(transparent)] -#[doc = " HIP Memory Advise values\n\n @note This memory advise enumeration is used on Linux, not Windows."] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemoryAdvise(pub ::std::os::raw::c_uint); -impl hipMemRangeCoherencyMode { - #[doc = "< Updates to memory with this attribute can be\n< done coherently from all devices"] - pub const hipMemRangeCoherencyModeFineGrain: hipMemRangeCoherencyMode = - hipMemRangeCoherencyMode(0); -} -impl hipMemRangeCoherencyMode { - #[doc = "< Writes to memory with this attribute can be\n< performed by a single device at a time"] - pub const hipMemRangeCoherencyModeCoarseGrain: hipMemRangeCoherencyMode = - hipMemRangeCoherencyMode(1); -} -impl hipMemRangeCoherencyMode { - #[doc = "< Memory region queried contains subregions with\n< both hipMemRangeCoherencyModeFineGrain and\n< hipMemRangeCoherencyModeCoarseGrain attributes"] - pub const hipMemRangeCoherencyModeIndeterminate: hipMemRangeCoherencyMode = - hipMemRangeCoherencyMode(2); -} -#[repr(transparent)] -#[doc = " HIP Coherency Mode"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemRangeCoherencyMode(pub ::std::os::raw::c_uint); -impl hipMemRangeAttribute { - #[doc = "< Whether the range will mostly be read and\n< only occassionally be written to"] - pub const hipMemRangeAttributeReadMostly: hipMemRangeAttribute = hipMemRangeAttribute(1); -} -impl hipMemRangeAttribute { - #[doc = "< The preferred location of the range"] - pub const hipMemRangeAttributePreferredLocation: hipMemRangeAttribute = hipMemRangeAttribute(2); -} -impl hipMemRangeAttribute { - #[doc = "< Memory range has hipMemAdviseSetAccessedBy\n< set for the specified device"] - pub const hipMemRangeAttributeAccessedBy: hipMemRangeAttribute = hipMemRangeAttribute(3); -} -impl hipMemRangeAttribute { - #[doc = "< The last location to where the range was\n< prefetched"] - pub const hipMemRangeAttributeLastPrefetchLocation: hipMemRangeAttribute = - hipMemRangeAttribute(4); -} -impl hipMemRangeAttribute { - #[doc = "< Returns coherency mode\n< @ref hipMemRangeCoherencyMode for the range"] - pub const hipMemRangeAttributeCoherencyMode: hipMemRangeAttribute = hipMemRangeAttribute(100); -} -#[repr(transparent)] -#[doc = " HIP range attributes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemRangeAttribute(pub ::std::os::raw::c_uint); -impl hipMemPoolAttr { - #[doc = " (value type = int)\n Allow @p hipMemAllocAsync to use memory asynchronously freed\n in another streams as long as a stream ordering dependency\n of the allocating stream on the free action exists.\n hip events and null stream interactions can create the required\n stream ordered dependencies. (default enabled)"] - pub const hipMemPoolReuseFollowEventDependencies: hipMemPoolAttr = hipMemPoolAttr(1); -} -impl hipMemPoolAttr { - #[doc = " (value type = int)\n Allow reuse of already completed frees when there is no dependency\n between the free and allocation. (default enabled)"] - pub const hipMemPoolReuseAllowOpportunistic: hipMemPoolAttr = hipMemPoolAttr(2); -} -impl hipMemPoolAttr { - #[doc = " (value type = int)\n Allow @p hipMemAllocAsync to insert new stream dependencies\n in order to establish the stream ordering required to reuse\n a piece of memory released by cuFreeAsync (default enabled)."] - pub const hipMemPoolReuseAllowInternalDependencies: hipMemPoolAttr = hipMemPoolAttr(3); -} -impl hipMemPoolAttr { - #[doc = " (value type = uint64_t)\n Amount of reserved memory in bytes to hold onto before trying\n to release memory back to the OS. When more than the release\n threshold bytes of memory are held by the memory pool, the\n allocator will try to release memory back to the OS on the\n next call to stream, event or context synchronize. (default 0)"] - pub const hipMemPoolAttrReleaseThreshold: hipMemPoolAttr = hipMemPoolAttr(4); -} -impl hipMemPoolAttr { - #[doc = " (value type = uint64_t)\n Amount of backing memory currently allocated for the mempool."] - pub const hipMemPoolAttrReservedMemCurrent: hipMemPoolAttr = hipMemPoolAttr(5); -} -impl hipMemPoolAttr { - #[doc = " (value type = uint64_t)\n High watermark of backing memory allocated for the mempool since the\n last time it was reset. High watermark can only be reset to zero."] - pub const hipMemPoolAttrReservedMemHigh: hipMemPoolAttr = hipMemPoolAttr(6); -} -impl hipMemPoolAttr { - #[doc = " (value type = uint64_t)\n Amount of memory from the pool that is currently in use by the application."] - pub const hipMemPoolAttrUsedMemCurrent: hipMemPoolAttr = hipMemPoolAttr(7); -} -impl hipMemPoolAttr { - #[doc = " (value type = uint64_t)\n High watermark of the amount of memory from the pool that was in use by the application since\n the last time it was reset. High watermark can only be reset to zero."] - pub const hipMemPoolAttrUsedMemHigh: hipMemPoolAttr = hipMemPoolAttr(8); -} -#[repr(transparent)] -#[doc = " HIP memory pool attributes"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemPoolAttr(pub ::std::os::raw::c_uint); -impl hipMemLocationType { - pub const hipMemLocationTypeInvalid: hipMemLocationType = hipMemLocationType(0); -} -impl hipMemLocationType { - #[doc = "< Device location, thus it's HIP device ID"] - pub const hipMemLocationTypeDevice: hipMemLocationType = hipMemLocationType(1); -} -#[repr(transparent)] -#[doc = " Specifies the type of location"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemLocationType(pub ::std::os::raw::c_uint); -#[doc = " Specifies a memory location.\n\n To specify a gpu, set type = @p hipMemLocationTypeDevice and set id = the gpu's device ID"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemLocation { - #[doc = "< Specifies the location type, which describes the meaning of id"] - pub type_: hipMemLocationType, - #[doc = "< Identifier for the provided location type @p hipMemLocationType"] - pub id: ::std::os::raw::c_int, -} -impl hipMemAccessFlags { - #[doc = "< Default, make the address range not accessible"] - pub const hipMemAccessFlagsProtNone: hipMemAccessFlags = hipMemAccessFlags(0); -} -impl hipMemAccessFlags { - #[doc = "< Set the address range read accessible"] - pub const hipMemAccessFlagsProtRead: hipMemAccessFlags = hipMemAccessFlags(1); -} -impl hipMemAccessFlags { - #[doc = "< Set the address range read-write accessible"] - pub const hipMemAccessFlagsProtReadWrite: hipMemAccessFlags = hipMemAccessFlags(3); -} -#[repr(transparent)] -#[doc = " Specifies the memory protection flags for mapping\n"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemAccessFlags(pub ::std::os::raw::c_uint); -#[doc = " Memory access descriptor"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemAccessDesc { - #[doc = "< Location on which the accessibility has to change"] - pub location: hipMemLocation, - #[doc = "< Accessibility flags to set"] - pub flags: hipMemAccessFlags, -} -impl hipMemAllocationType { - pub const hipMemAllocationTypeInvalid: hipMemAllocationType = hipMemAllocationType(0); -} -impl hipMemAllocationType { - #[doc = " This allocation type is 'pinned', i.e. cannot migrate from its current\n location while the application is actively using it"] - pub const hipMemAllocationTypePinned: hipMemAllocationType = hipMemAllocationType(1); -} -impl hipMemAllocationType { - #[doc = " This allocation type is 'pinned', i.e. cannot migrate from its current\n location while the application is actively using it"] - pub const hipMemAllocationTypeMax: hipMemAllocationType = hipMemAllocationType(2147483647); -} -#[repr(transparent)] -#[doc = " Defines the allocation types"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemAllocationType(pub ::std::os::raw::c_uint); -impl hipMemAllocationHandleType { - #[doc = "< Does not allow any export mechanism"] - pub const hipMemHandleTypeNone: hipMemAllocationHandleType = hipMemAllocationHandleType(0); -} -impl hipMemAllocationHandleType { - #[doc = "< Allows a file descriptor for exporting. Permitted only on POSIX systems"] - pub const hipMemHandleTypePosixFileDescriptor: hipMemAllocationHandleType = - hipMemAllocationHandleType(1); -} -impl hipMemAllocationHandleType { - #[doc = "< Allows a Win32 NT handle for exporting. (HANDLE)"] - pub const hipMemHandleTypeWin32: hipMemAllocationHandleType = hipMemAllocationHandleType(2); -} -impl hipMemAllocationHandleType { - #[doc = "< Allows a Win32 KMT handle for exporting. (D3DKMT_HANDLE)"] - pub const hipMemHandleTypeWin32Kmt: hipMemAllocationHandleType = hipMemAllocationHandleType(4); -} -#[repr(transparent)] -#[doc = " Flags for specifying handle types for memory pool allocations\n"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemAllocationHandleType(pub ::std::os::raw::c_uint); -#[doc = " Specifies the properties of allocations made from the pool."] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemPoolProps { - #[doc = "< Allocation type. Currently must be specified as @p hipMemAllocationTypePinned"] - pub allocType: hipMemAllocationType, - #[doc = "< Handle types that will be supported by allocations from the pool"] - pub handleTypes: hipMemAllocationHandleType, - #[doc = "< Location where allocations should reside"] - pub location: hipMemLocation, - #[doc = " Windows-specific LPSECURITYATTRIBUTES required when @p hipMemHandleTypeWin32 is specified"] - pub win32SecurityAttributes: *mut ::std::os::raw::c_void, - #[doc = "< Reserved for future use, must be 0"] - pub reserved: [::std::os::raw::c_uchar; 64usize], -} -#[doc = " Opaque data structure for exporting a pool allocation"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemPoolPtrExportData { - pub reserved: [::std::os::raw::c_uchar; 64usize], -} -impl hipJitOption { - pub const hipJitOptionMaxRegisters: hipJitOption = hipJitOption(0); -} -impl hipJitOption { - pub const hipJitOptionThreadsPerBlock: hipJitOption = hipJitOption(1); -} -impl hipJitOption { - pub const hipJitOptionWallTime: hipJitOption = hipJitOption(2); -} -impl hipJitOption { - pub const hipJitOptionInfoLogBuffer: hipJitOption = hipJitOption(3); -} -impl hipJitOption { - pub const hipJitOptionInfoLogBufferSizeBytes: hipJitOption = hipJitOption(4); -} -impl hipJitOption { - pub const hipJitOptionErrorLogBuffer: hipJitOption = hipJitOption(5); -} -impl hipJitOption { - pub const hipJitOptionErrorLogBufferSizeBytes: hipJitOption = hipJitOption(6); -} -impl hipJitOption { - pub const hipJitOptionOptimizationLevel: hipJitOption = hipJitOption(7); -} -impl hipJitOption { - pub const hipJitOptionTargetFromContext: hipJitOption = hipJitOption(8); -} -impl hipJitOption { - pub const hipJitOptionTarget: hipJitOption = hipJitOption(9); -} -impl hipJitOption { - pub const hipJitOptionFallbackStrategy: hipJitOption = hipJitOption(10); -} -impl hipJitOption { - pub const hipJitOptionGenerateDebugInfo: hipJitOption = hipJitOption(11); -} -impl hipJitOption { - pub const hipJitOptionLogVerbose: hipJitOption = hipJitOption(12); -} -impl hipJitOption { - pub const hipJitOptionGenerateLineInfo: hipJitOption = hipJitOption(13); -} -impl hipJitOption { - pub const hipJitOptionCacheMode: hipJitOption = hipJitOption(14); -} -impl hipJitOption { - pub const hipJitOptionSm3xOpt: hipJitOption = hipJitOption(15); -} -impl hipJitOption { - pub const hipJitOptionFastCompile: hipJitOption = hipJitOption(16); -} -impl hipJitOption { - pub const hipJitOptionNumOptions: hipJitOption = hipJitOption(17); -} -#[repr(transparent)] -#[doc = " hipJitOption"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipJitOption(pub ::std::os::raw::c_uint); -impl hipFuncAttribute { - pub const hipFuncAttributeMaxDynamicSharedMemorySize: hipFuncAttribute = hipFuncAttribute(8); -} -impl hipFuncAttribute { - pub const hipFuncAttributePreferredSharedMemoryCarveout: hipFuncAttribute = hipFuncAttribute(9); -} -impl hipFuncAttribute { - pub const hipFuncAttributeMax: hipFuncAttribute = hipFuncAttribute(10); -} -#[repr(transparent)] -#[doc = " @warning On AMD devices and some Nvidia devices, these hints and controls are ignored."] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipFuncAttribute(pub ::std::os::raw::c_uint); -impl hipFuncCache_t { - #[doc = "< no preference for shared memory or L1 (default)"] - pub const hipFuncCachePreferNone: hipFuncCache_t = hipFuncCache_t(0); -} -impl hipFuncCache_t { - #[doc = "< prefer larger shared memory and smaller L1 cache"] - pub const hipFuncCachePreferShared: hipFuncCache_t = hipFuncCache_t(1); -} -impl hipFuncCache_t { - #[doc = "< prefer larger L1 cache and smaller shared memory"] - pub const hipFuncCachePreferL1: hipFuncCache_t = hipFuncCache_t(2); -} -impl hipFuncCache_t { - #[doc = "< prefer equal size L1 cache and shared memory"] - pub const hipFuncCachePreferEqual: hipFuncCache_t = hipFuncCache_t(3); -} -#[repr(transparent)] -#[doc = " @warning On AMD devices and some Nvidia devices, these hints and controls are ignored."] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipFuncCache_t(pub ::std::os::raw::c_uint); -impl hipSharedMemConfig { - #[doc = "< The compiler selects a device-specific value for the banking."] - pub const hipSharedMemBankSizeDefault: hipSharedMemConfig = hipSharedMemConfig(0); -} -impl hipSharedMemConfig { - #[doc = "< Shared mem is banked at 4-bytes intervals and performs best\n< when adjacent threads access data 4 bytes apart."] - pub const hipSharedMemBankSizeFourByte: hipSharedMemConfig = hipSharedMemConfig(1); -} -impl hipSharedMemConfig { - #[doc = "< Shared mem is banked at 8-byte intervals and performs best\n< when adjacent threads access data 4 bytes apart."] - pub const hipSharedMemBankSizeEightByte: hipSharedMemConfig = hipSharedMemConfig(2); -} -#[repr(transparent)] -#[doc = " @warning On AMD devices and some Nvidia devices, these hints and controls are ignored."] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipSharedMemConfig(pub ::std::os::raw::c_uint); -#[doc = " Struct for data in 3D"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct dim3 { - #[doc = "< x"] - pub x: u32, - #[doc = "< y"] - pub y: u32, - #[doc = "< z"] - pub z: u32, -} -#[doc = " struct hipLaunchParams_t"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipLaunchParams_t { - #[doc = "< Device function symbol"] - pub func: *mut ::std::os::raw::c_void, - #[doc = "< Grid dimentions"] - pub gridDim: dim3, - #[doc = "< Block dimentions"] - pub blockDim: dim3, - #[doc = "< Arguments"] - pub args: *mut *mut ::std::os::raw::c_void, - #[doc = "< Shared memory"] - pub sharedMem: usize, - #[doc = "< Stream identifier"] - pub stream: hipStream_t, -} -#[doc = " struct hipLaunchParams_t"] -pub type hipLaunchParams = hipLaunchParams_t; -#[doc = " struct hipFunctionLaunchParams_t"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipFunctionLaunchParams_t { - #[doc = "< Kernel to launch"] - pub function: hipFunction_t, - #[doc = "< Width(X) of grid in blocks"] - pub gridDimX: ::std::os::raw::c_uint, - #[doc = "< Height(Y) of grid in blocks"] - pub gridDimY: ::std::os::raw::c_uint, - #[doc = "< Depth(Z) of grid in blocks"] - pub gridDimZ: ::std::os::raw::c_uint, - #[doc = "< X dimension of each thread block"] - pub blockDimX: ::std::os::raw::c_uint, - #[doc = "< Y dimension of each thread block"] - pub blockDimY: ::std::os::raw::c_uint, - #[doc = "< Z dimension of each thread block"] - pub blockDimZ: ::std::os::raw::c_uint, - #[doc = "< Shared memory"] - pub sharedMemBytes: ::std::os::raw::c_uint, - #[doc = "< Stream identifier"] - pub hStream: hipStream_t, - #[doc = "< Kernel parameters"] - pub kernelParams: *mut *mut ::std::os::raw::c_void, -} -#[doc = " struct hipFunctionLaunchParams_t"] -pub type hipFunctionLaunchParams = hipFunctionLaunchParams_t; -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeOpaqueFd: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(1); -} -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeOpaqueWin32: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(2); -} -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeOpaqueWin32Kmt: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(3); -} -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeD3D12Heap: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(4); -} -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeD3D12Resource: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(5); -} -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeD3D11Resource: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(6); -} -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeD3D11ResourceKmt: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(7); -} -impl hipExternalMemoryHandleType_enum { - pub const hipExternalMemoryHandleTypeNvSciBuf: hipExternalMemoryHandleType_enum = - hipExternalMemoryHandleType_enum(8); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipExternalMemoryHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::hipExternalMemoryHandleType_enum as hipExternalMemoryHandleType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalMemoryHandleDesc_st { - pub type_: hipExternalMemoryHandleType, - pub handle: hipExternalMemoryHandleDesc_st__bindgen_ty_1, - pub size: ::std::os::raw::c_ulonglong, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipExternalMemoryHandleDesc_st__bindgen_ty_1 { - pub fd: ::std::os::raw::c_int, - pub win32: hipExternalMemoryHandleDesc_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciBufObject: *const ::std::os::raw::c_void, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalMemoryHandleDesc_st__bindgen_ty_1__bindgen_ty_1 { - pub handle: *mut ::std::os::raw::c_void, - pub name: *const ::std::os::raw::c_void, -} -pub type hipExternalMemoryHandleDesc = hipExternalMemoryHandleDesc_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalMemoryBufferDesc_st { - pub offset: ::std::os::raw::c_ulonglong, - pub size: ::std::os::raw::c_ulonglong, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -pub type hipExternalMemoryBufferDesc = hipExternalMemoryBufferDesc_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalMemoryMipmappedArrayDesc_st { - pub offset: ::std::os::raw::c_ulonglong, - pub formatDesc: hipChannelFormatDesc, - pub extent: hipExtent, - pub flags: ::std::os::raw::c_uint, - pub numLevels: ::std::os::raw::c_uint, -} -pub type hipExternalMemoryMipmappedArrayDesc = hipExternalMemoryMipmappedArrayDesc_st; -pub type hipExternalMemory_t = *mut ::std::os::raw::c_void; -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeOpaqueFd: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(1); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeOpaqueWin32: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(2); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeOpaqueWin32Kmt: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(3); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeD3D12Fence: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(4); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeD3D11Fence: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(5); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeNvSciSync: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(6); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeKeyedMutex: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(7); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeKeyedMutexKmt: hipExternalSemaphoreHandleType_enum = - hipExternalSemaphoreHandleType_enum(8); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeTimelineSemaphoreFd: - hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum(9); -} -impl hipExternalSemaphoreHandleType_enum { - pub const hipExternalSemaphoreHandleTypeTimelineSemaphoreWin32: - hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum(10); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipExternalSemaphoreHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::hipExternalSemaphoreHandleType_enum as hipExternalSemaphoreHandleType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreHandleDesc_st { - pub type_: hipExternalSemaphoreHandleType, - pub handle: hipExternalSemaphoreHandleDesc_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipExternalSemaphoreHandleDesc_st__bindgen_ty_1 { - pub fd: ::std::os::raw::c_int, - pub win32: hipExternalSemaphoreHandleDesc_st__bindgen_ty_1__bindgen_ty_1, - pub NvSciSyncObj: *const ::std::os::raw::c_void, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreHandleDesc_st__bindgen_ty_1__bindgen_ty_1 { - pub handle: *mut ::std::os::raw::c_void, - pub name: *const ::std::os::raw::c_void, -} -pub type hipExternalSemaphoreHandleDesc = hipExternalSemaphoreHandleDesc_st; -pub type hipExternalSemaphore_t = *mut ::std::os::raw::c_void; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreSignalParams_st { - pub params: hipExternalSemaphoreSignalParams_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreSignalParams_st__bindgen_ty_1 { - pub fence: hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSync: hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_2, - pub keyedMutex: hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_3, - pub reserved: [::std::os::raw::c_uint; 12usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_1 { - pub value: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_2 { - pub fence: *mut ::std::os::raw::c_void, - pub reserved: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_3 { - pub key: ::std::os::raw::c_ulonglong, -} -pub type hipExternalSemaphoreSignalParams = hipExternalSemaphoreSignalParams_st; -#[doc = " External semaphore wait parameters, compatible with driver type"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreWaitParams_st { - pub params: hipExternalSemaphoreWaitParams_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreWaitParams_st__bindgen_ty_1 { - pub fence: hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSync: hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_2, - pub keyedMutex: hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_3, - pub reserved: [::std::os::raw::c_uint; 10usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_1 { - pub value: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_2 { - pub fence: *mut ::std::os::raw::c_void, - pub reserved: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_3 { - pub key: ::std::os::raw::c_ulonglong, - pub timeoutMs: ::std::os::raw::c_uint, -} -#[doc = " External semaphore wait parameters, compatible with driver type"] -pub type hipExternalSemaphoreWaitParams = hipExternalSemaphoreWaitParams_st; -impl hipGraphicsRegisterFlags { - pub const hipGraphicsRegisterFlagsNone: hipGraphicsRegisterFlags = hipGraphicsRegisterFlags(0); -} -impl hipGraphicsRegisterFlags { - #[doc = "< HIP will not write to this registered resource"] - pub const hipGraphicsRegisterFlagsReadOnly: hipGraphicsRegisterFlags = - hipGraphicsRegisterFlags(1); -} -impl hipGraphicsRegisterFlags { - pub const hipGraphicsRegisterFlagsWriteDiscard: hipGraphicsRegisterFlags = - hipGraphicsRegisterFlags(2); -} -impl hipGraphicsRegisterFlags { - #[doc = "< HIP will bind this resource to a surface"] - pub const hipGraphicsRegisterFlagsSurfaceLoadStore: hipGraphicsRegisterFlags = - hipGraphicsRegisterFlags(4); -} -impl hipGraphicsRegisterFlags { - pub const hipGraphicsRegisterFlagsTextureGather: hipGraphicsRegisterFlags = - hipGraphicsRegisterFlags(8); -} -#[repr(transparent)] -#[doc = " HIP Access falgs for Interop resources."] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipGraphicsRegisterFlags(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct _hipGraphicsResource { - _unused: [u8; 0], -} -pub type hipGraphicsResource = _hipGraphicsResource; -pub type hipGraphicsResource_t = *mut hipGraphicsResource; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipGraph { - _unused: [u8; 0], -} -#[doc = " An opaque value that represents a hip graph"] -pub type hipGraph_t = *mut ihipGraph; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipGraphNode { - _unused: [u8; 0], -} -#[doc = " An opaque value that represents a hip graph node"] -pub type hipGraphNode_t = *mut hipGraphNode; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipGraphExec { - _unused: [u8; 0], -} -#[doc = " An opaque value that represents a hip graph Exec"] -pub type hipGraphExec_t = *mut hipGraphExec; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipUserObject { - _unused: [u8; 0], -} -#[doc = " An opaque value that represents a user obj"] -pub type hipUserObject_t = *mut hipUserObject; -impl hipGraphNodeType { - #[doc = "< GPU kernel node"] - pub const hipGraphNodeTypeKernel: hipGraphNodeType = hipGraphNodeType(0); -} -impl hipGraphNodeType { - #[doc = "< Memcpy node"] - pub const hipGraphNodeTypeMemcpy: hipGraphNodeType = hipGraphNodeType(1); -} -impl hipGraphNodeType { - #[doc = "< Memset node"] - pub const hipGraphNodeTypeMemset: hipGraphNodeType = hipGraphNodeType(2); -} -impl hipGraphNodeType { - #[doc = "< Host (executable) node"] - pub const hipGraphNodeTypeHost: hipGraphNodeType = hipGraphNodeType(3); -} -impl hipGraphNodeType { - #[doc = "< Node which executes an embedded graph"] - pub const hipGraphNodeTypeGraph: hipGraphNodeType = hipGraphNodeType(4); -} -impl hipGraphNodeType { - #[doc = "< Empty (no-op) node"] - pub const hipGraphNodeTypeEmpty: hipGraphNodeType = hipGraphNodeType(5); -} -impl hipGraphNodeType { - #[doc = "< External event wait node"] - pub const hipGraphNodeTypeWaitEvent: hipGraphNodeType = hipGraphNodeType(6); -} -impl hipGraphNodeType { - #[doc = "< External event record node"] - pub const hipGraphNodeTypeEventRecord: hipGraphNodeType = hipGraphNodeType(7); -} -impl hipGraphNodeType { - #[doc = "< External Semaphore signal node"] - pub const hipGraphNodeTypeExtSemaphoreSignal: hipGraphNodeType = hipGraphNodeType(8); -} -impl hipGraphNodeType { - #[doc = "< External Semaphore wait node"] - pub const hipGraphNodeTypeExtSemaphoreWait: hipGraphNodeType = hipGraphNodeType(9); -} -impl hipGraphNodeType { - #[doc = "< Memory alloc node"] - pub const hipGraphNodeTypeMemAlloc: hipGraphNodeType = hipGraphNodeType(10); -} -impl hipGraphNodeType { - #[doc = "< Memory free node"] - pub const hipGraphNodeTypeMemFree: hipGraphNodeType = hipGraphNodeType(11); -} -impl hipGraphNodeType { - #[doc = "< MemcpyFromSymbol node"] - pub const hipGraphNodeTypeMemcpyFromSymbol: hipGraphNodeType = hipGraphNodeType(12); -} -impl hipGraphNodeType { - #[doc = "< MemcpyToSymbol node"] - pub const hipGraphNodeTypeMemcpyToSymbol: hipGraphNodeType = hipGraphNodeType(13); -} -impl hipGraphNodeType { - pub const hipGraphNodeTypeCount: hipGraphNodeType = hipGraphNodeType(14); -} -#[repr(transparent)] -#[doc = " hipGraphNodeType"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipGraphNodeType(pub ::std::os::raw::c_uint); -pub type hipHostFn_t = - ::std::option::Option; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipHostNodeParams { - pub fn_: hipHostFn_t, - pub userData: *mut ::std::os::raw::c_void, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipKernelNodeParams { - pub blockDim: dim3, - pub extra: *mut *mut ::std::os::raw::c_void, - pub func: *mut ::std::os::raw::c_void, - pub gridDim: dim3, - pub kernelParams: *mut *mut ::std::os::raw::c_void, - pub sharedMemBytes: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemsetParams { - pub dst: *mut ::std::os::raw::c_void, - pub elementSize: ::std::os::raw::c_uint, - pub height: usize, - pub pitch: usize, - pub value: ::std::os::raw::c_uint, - pub width: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemAllocNodeParams { - #[doc = "< Pool properties, which contain where\n< the location should reside"] - pub poolProps: hipMemPoolProps, - #[doc = "< The number of memory access descriptors.\n< Must not be bigger than the number of GPUs"] - pub accessDescs: *const hipMemAccessDesc, - #[doc = "< The number of access descriptors"] - pub accessDescCount: usize, - #[doc = "< The size of the requested allocation in bytes"] - pub bytesize: usize, - #[doc = "< Returned device address of the allocation"] - pub dptr: *mut ::std::os::raw::c_void, -} -impl hipKernelNodeAttrID { - pub const hipKernelNodeAttributeAccessPolicyWindow: hipKernelNodeAttrID = - hipKernelNodeAttrID(1); -} -impl hipKernelNodeAttrID { - pub const hipKernelNodeAttributeCooperative: hipKernelNodeAttrID = hipKernelNodeAttrID(2); -} -#[repr(transparent)] -#[doc = " Kernel node attributeID"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipKernelNodeAttrID(pub ::std::os::raw::c_uint); -impl hipAccessProperty { - pub const hipAccessPropertyNormal: hipAccessProperty = hipAccessProperty(0); -} -impl hipAccessProperty { - pub const hipAccessPropertyStreaming: hipAccessProperty = hipAccessProperty(1); -} -impl hipAccessProperty { - pub const hipAccessPropertyPersisting: hipAccessProperty = hipAccessProperty(2); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipAccessProperty(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipAccessPolicyWindow { - pub base_ptr: *mut ::std::os::raw::c_void, - pub hitProp: hipAccessProperty, - pub hitRatio: f32, - pub missProp: hipAccessProperty, - pub num_bytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipKernelNodeAttrValue { - pub accessPolicyWindow: hipAccessPolicyWindow, - pub cooperative: ::std::os::raw::c_int, -} -#[doc = " Memset node params"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct HIP_MEMSET_NODE_PARAMS { - #[doc = "< Destination pointer on device"] - pub dst: hipDeviceptr_t, - #[doc = "< Destination device pointer pitch. Unused if height equals 1"] - pub pitch: usize, - #[doc = "< Value of memset to be set"] - pub value: ::std::os::raw::c_uint, - #[doc = "< Element in bytes. Must be 1, 2, or 4."] - pub elementSize: ::std::os::raw::c_uint, - #[doc = "< Width of a row"] - pub width: usize, - #[doc = "< Number of rows"] - pub height: usize, -} -impl hipGraphExecUpdateResult { - #[doc = "< The update succeeded"] - pub const hipGraphExecUpdateSuccess: hipGraphExecUpdateResult = hipGraphExecUpdateResult(0); -} -impl hipGraphExecUpdateResult { - #[doc = "< The update failed for an unexpected reason which is described\n< in the return value of the function"] - pub const hipGraphExecUpdateError: hipGraphExecUpdateResult = hipGraphExecUpdateResult(1); -} -impl hipGraphExecUpdateResult { - #[doc = "< The update failed because the topology changed"] - pub const hipGraphExecUpdateErrorTopologyChanged: hipGraphExecUpdateResult = - hipGraphExecUpdateResult(2); -} -impl hipGraphExecUpdateResult { - #[doc = "< The update failed because a node type changed"] - pub const hipGraphExecUpdateErrorNodeTypeChanged: hipGraphExecUpdateResult = - hipGraphExecUpdateResult(3); -} -impl hipGraphExecUpdateResult { - pub const hipGraphExecUpdateErrorFunctionChanged: hipGraphExecUpdateResult = - hipGraphExecUpdateResult(4); -} -impl hipGraphExecUpdateResult { - pub const hipGraphExecUpdateErrorParametersChanged: hipGraphExecUpdateResult = - hipGraphExecUpdateResult(5); -} -impl hipGraphExecUpdateResult { - pub const hipGraphExecUpdateErrorNotSupported: hipGraphExecUpdateResult = - hipGraphExecUpdateResult(6); -} -impl hipGraphExecUpdateResult { - pub const hipGraphExecUpdateErrorUnsupportedFunctionChange: hipGraphExecUpdateResult = - hipGraphExecUpdateResult(7); -} -#[repr(transparent)] -#[doc = " Graph execution update result"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipGraphExecUpdateResult(pub ::std::os::raw::c_uint); -impl hipStreamCaptureMode { - pub const hipStreamCaptureModeGlobal: hipStreamCaptureMode = hipStreamCaptureMode(0); -} -impl hipStreamCaptureMode { - pub const hipStreamCaptureModeThreadLocal: hipStreamCaptureMode = hipStreamCaptureMode(1); -} -impl hipStreamCaptureMode { - pub const hipStreamCaptureModeRelaxed: hipStreamCaptureMode = hipStreamCaptureMode(2); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipStreamCaptureMode(pub ::std::os::raw::c_uint); -impl hipStreamCaptureStatus { - #[doc = "< Stream is not capturing"] - pub const hipStreamCaptureStatusNone: hipStreamCaptureStatus = hipStreamCaptureStatus(0); -} -impl hipStreamCaptureStatus { - #[doc = "< Stream is actively capturing"] - pub const hipStreamCaptureStatusActive: hipStreamCaptureStatus = hipStreamCaptureStatus(1); -} -impl hipStreamCaptureStatus { - #[doc = "< Stream is part of a capture sequence that has been\n< invalidated, but not terminated"] - pub const hipStreamCaptureStatusInvalidated: hipStreamCaptureStatus = hipStreamCaptureStatus(2); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipStreamCaptureStatus(pub ::std::os::raw::c_uint); -impl hipStreamUpdateCaptureDependenciesFlags { - #[doc = "< Add new nodes to the dependency set"] - pub const hipStreamAddCaptureDependencies: hipStreamUpdateCaptureDependenciesFlags = - hipStreamUpdateCaptureDependenciesFlags(0); -} -impl hipStreamUpdateCaptureDependenciesFlags { - #[doc = "< Replace the dependency set with the new nodes"] - pub const hipStreamSetCaptureDependencies: hipStreamUpdateCaptureDependenciesFlags = - hipStreamUpdateCaptureDependenciesFlags(1); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipStreamUpdateCaptureDependenciesFlags(pub ::std::os::raw::c_uint); -impl hipGraphMemAttributeType { - #[doc = "< Amount of memory, in bytes, currently associated with graphs"] - pub const hipGraphMemAttrUsedMemCurrent: hipGraphMemAttributeType = hipGraphMemAttributeType(0); -} -impl hipGraphMemAttributeType { - #[doc = "< High watermark of memory, in bytes, associated with graphs since the last time."] - pub const hipGraphMemAttrUsedMemHigh: hipGraphMemAttributeType = hipGraphMemAttributeType(1); -} -impl hipGraphMemAttributeType { - #[doc = "< Amount of memory, in bytes, currently allocated for graphs."] - pub const hipGraphMemAttrReservedMemCurrent: hipGraphMemAttributeType = - hipGraphMemAttributeType(2); -} -impl hipGraphMemAttributeType { - #[doc = "< High watermark of memory, in bytes, currently allocated for graphs"] - pub const hipGraphMemAttrReservedMemHigh: hipGraphMemAttributeType = - hipGraphMemAttributeType(3); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipGraphMemAttributeType(pub ::std::os::raw::c_uint); -impl hipUserObjectFlags { - #[doc = "< Destructor execution is not synchronized."] - pub const hipUserObjectNoDestructorSync: hipUserObjectFlags = hipUserObjectFlags(1); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipUserObjectFlags(pub ::std::os::raw::c_uint); -impl hipUserObjectRetainFlags { - #[doc = "< Add new reference or retain."] - pub const hipGraphUserObjectMove: hipUserObjectRetainFlags = hipUserObjectRetainFlags(1); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipUserObjectRetainFlags(pub ::std::os::raw::c_uint); -impl hipGraphInstantiateFlags { - pub const hipGraphInstantiateFlagAutoFreeOnLaunch: hipGraphInstantiateFlags = - hipGraphInstantiateFlags(1); -} -impl hipGraphInstantiateFlags { - pub const hipGraphInstantiateFlagUpload: hipGraphInstantiateFlags = hipGraphInstantiateFlags(2); -} -impl hipGraphInstantiateFlags { - pub const hipGraphInstantiateFlagDeviceLaunch: hipGraphInstantiateFlags = - hipGraphInstantiateFlags(4); -} -impl hipGraphInstantiateFlags { - pub const hipGraphInstantiateFlagUseNodePriority: hipGraphInstantiateFlags = - hipGraphInstantiateFlags(8); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipGraphInstantiateFlags(pub ::std::os::raw::c_uint); -impl hipGraphDebugDotFlags { - pub const hipGraphDebugDotFlagsVerbose: hipGraphDebugDotFlags = hipGraphDebugDotFlags(1); -} -impl hipGraphDebugDotFlags { - #[doc = "< Adds hipKernelNodeParams to output"] - pub const hipGraphDebugDotFlagsKernelNodeParams: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(4); -} -impl hipGraphDebugDotFlags { - #[doc = "< Adds hipMemcpy3DParms to output"] - pub const hipGraphDebugDotFlagsMemcpyNodeParams: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(8); -} -impl hipGraphDebugDotFlags { - #[doc = "< Adds hipMemsetParams to output"] - pub const hipGraphDebugDotFlagsMemsetNodeParams: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(16); -} -impl hipGraphDebugDotFlags { - #[doc = "< Adds hipHostNodeParams to output"] - pub const hipGraphDebugDotFlagsHostNodeParams: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(32); -} -impl hipGraphDebugDotFlags { - pub const hipGraphDebugDotFlagsEventNodeParams: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(64); -} -impl hipGraphDebugDotFlags { - pub const hipGraphDebugDotFlagsExtSemasSignalNodeParams: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(128); -} -impl hipGraphDebugDotFlags { - pub const hipGraphDebugDotFlagsExtSemasWaitNodeParams: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(256); -} -impl hipGraphDebugDotFlags { - pub const hipGraphDebugDotFlagsKernelNodeAttributes: hipGraphDebugDotFlags = - hipGraphDebugDotFlags(512); -} -impl hipGraphDebugDotFlags { - pub const hipGraphDebugDotFlagsHandles: hipGraphDebugDotFlags = hipGraphDebugDotFlags(1024); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipGraphDebugDotFlags(pub ::std::os::raw::c_uint); -#[doc = " Memory allocation properties"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemAllocationProp { - #[doc = "< Memory allocation type"] - pub type_: hipMemAllocationType, - #[doc = "< Requested handle type"] - pub requestedHandleType: hipMemAllocationHandleType, - #[doc = "< Memory location"] - pub location: hipMemLocation, - #[doc = "< Metadata for Win32 handles"] - pub win32HandleMetaData: *mut ::std::os::raw::c_void, - pub allocFlags: hipMemAllocationProp__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemAllocationProp__bindgen_ty_1 { - #[doc = "< Compression type"] - pub compressionType: ::std::os::raw::c_uchar, - #[doc = "< RDMA capable"] - pub gpuDirectRDMACapable: ::std::os::raw::c_uchar, - #[doc = "< Usage"] - pub usage: ::std::os::raw::c_ushort, -} -#[doc = " External semaphore signal node parameters"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreSignalNodeParams { - pub extSemArray: *mut hipExternalSemaphore_t, - pub paramsArray: *const hipExternalSemaphoreSignalParams, - pub numExtSems: ::std::os::raw::c_uint, -} -#[doc = " External semaphore wait node parameters"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipExternalSemaphoreWaitNodeParams { - pub extSemArray: *mut hipExternalSemaphore_t, - pub paramsArray: *const hipExternalSemaphoreWaitParams, - pub numExtSems: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct ihipMemGenericAllocationHandle { - _unused: [u8; 0], -} -#[doc = " Generic handle for memory allocation"] -pub type hipMemGenericAllocationHandle_t = *mut ihipMemGenericAllocationHandle; -impl hipMemAllocationGranularity_flags { - #[doc = "< Minimum granularity"] - pub const hipMemAllocationGranularityMinimum: hipMemAllocationGranularity_flags = - hipMemAllocationGranularity_flags(0); -} -impl hipMemAllocationGranularity_flags { - #[doc = "< Recommended granularity for performance"] - pub const hipMemAllocationGranularityRecommended: hipMemAllocationGranularity_flags = - hipMemAllocationGranularity_flags(1); -} -#[repr(transparent)] -#[doc = " Flags for granularity"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemAllocationGranularity_flags(pub ::std::os::raw::c_uint); -impl hipMemHandleType { - #[doc = "< Generic handle type"] - pub const hipMemHandleTypeGeneric: hipMemHandleType = hipMemHandleType(0); -} -#[repr(transparent)] -#[doc = " Memory handle type"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemHandleType(pub ::std::os::raw::c_uint); -impl hipMemOperationType { - #[doc = "< Map operation"] - pub const hipMemOperationTypeMap: hipMemOperationType = hipMemOperationType(1); -} -impl hipMemOperationType { - #[doc = "< Unmap operation"] - pub const hipMemOperationTypeUnmap: hipMemOperationType = hipMemOperationType(2); -} -#[repr(transparent)] -#[doc = " Memory operation types"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipMemOperationType(pub ::std::os::raw::c_uint); -impl hipArraySparseSubresourceType { - #[doc = "< Sparse level"] - pub const hipArraySparseSubresourceTypeSparseLevel: hipArraySparseSubresourceType = - hipArraySparseSubresourceType(0); -} -impl hipArraySparseSubresourceType { - #[doc = "< Miptail"] - pub const hipArraySparseSubresourceTypeMiptail: hipArraySparseSubresourceType = - hipArraySparseSubresourceType(1); -} -#[repr(transparent)] -#[doc = " Subresource types for sparse arrays"] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct hipArraySparseSubresourceType(pub ::std::os::raw::c_uint); -#[doc = " Map info for arrays"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipArrayMapInfo { - #[doc = "< Resource type"] - pub resourceType: hipResourceType, - pub resource: hipArrayMapInfo__bindgen_ty_1, - #[doc = "< Sparse subresource type"] - pub subresourceType: hipArraySparseSubresourceType, - pub subresource: hipArrayMapInfo__bindgen_ty_2, - #[doc = "< Memory operation type"] - pub memOperationType: hipMemOperationType, - #[doc = "< Memory handle type"] - pub memHandleType: hipMemHandleType, - pub memHandle: hipArrayMapInfo__bindgen_ty_3, - #[doc = "< Offset within the memory"] - pub offset: ::std::os::raw::c_ulonglong, - #[doc = "< Device ordinal bit mask"] - pub deviceBitMask: ::std::os::raw::c_uint, - #[doc = "< flags for future use, must be zero now."] - pub flags: ::std::os::raw::c_uint, - #[doc = "< Reserved for future use, must be zero now."] - pub reserved: [::std::os::raw::c_uint; 2usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipArrayMapInfo__bindgen_ty_1 { - pub mipmap: hipMipmappedArray, - pub array: hipArray_t, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipArrayMapInfo__bindgen_ty_2 { - pub sparseLevel: hipArrayMapInfo__bindgen_ty_2__bindgen_ty_1, - pub miptail: hipArrayMapInfo__bindgen_ty_2__bindgen_ty_2, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipArrayMapInfo__bindgen_ty_2__bindgen_ty_1 { - #[doc = "< For mipmapped arrays must be a valid mipmap level. For arrays must be zero"] - pub level: ::std::os::raw::c_uint, - #[doc = "< For layered arrays must be a valid layer index. Otherwise, must be zero"] - pub layer: ::std::os::raw::c_uint, - #[doc = "< X offset in elements"] - pub offsetX: ::std::os::raw::c_uint, - #[doc = "< Y offset in elements"] - pub offsetY: ::std::os::raw::c_uint, - #[doc = "< Z offset in elements"] - pub offsetZ: ::std::os::raw::c_uint, - #[doc = "< Width in elements"] - pub extentWidth: ::std::os::raw::c_uint, - #[doc = "< Height in elements"] - pub extentHeight: ::std::os::raw::c_uint, - #[doc = "< Depth in elements"] - pub extentDepth: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipArrayMapInfo__bindgen_ty_2__bindgen_ty_2 { - #[doc = "< For layered arrays must be a valid layer index. Otherwise, must be zero"] - pub layer: ::std::os::raw::c_uint, - #[doc = "< Offset within mip tail"] - pub offset: ::std::os::raw::c_ulonglong, - #[doc = "< Extent in bytes"] - pub size: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipArrayMapInfo__bindgen_ty_3 { - pub memHandle: hipMemGenericAllocationHandle_t, -} -#[doc = " Memcpy node params"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemcpyNodeParams { - #[doc = "< Must be zero."] - pub flags: ::std::os::raw::c_int, - #[doc = "< Must be zero."] - pub reserved: [::std::os::raw::c_int; 3usize], - #[doc = "< Params set for the memory copy."] - pub copyParams: hipMemcpy3DParms, -} -#[doc = " Child graph node params"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipChildGraphNodeParams { - #[doc = "< Either the child graph to clone into the node, or\n< a handle to the graph possesed by the node used during query"] - pub graph: hipGraph_t, -} -#[doc = " Event record node params"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipEventWaitNodeParams { - #[doc = "< Event to wait on"] - pub event: hipEvent_t, -} -#[doc = " Event record node params"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipEventRecordNodeParams { - #[doc = "< The event to be recorded when node executes"] - pub event: hipEvent_t, -} -#[doc = " Memory free node params"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipMemFreeNodeParams { - #[doc = "< the pointer to be freed"] - pub dptr: *mut ::std::os::raw::c_void, -} -#[doc = " Params for different graph nodes"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct hipGraphNodeParams { - pub type_: hipGraphNodeType, - pub reserved0: [::std::os::raw::c_int; 3usize], - pub __bindgen_anon_1: hipGraphNodeParams__bindgen_ty_1, - pub reserved2: ::std::os::raw::c_longlong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union hipGraphNodeParams__bindgen_ty_1 { - pub reserved1: [::std::os::raw::c_longlong; 29usize], - pub kernel: hipKernelNodeParams, - pub memcpy: hipMemcpyNodeParams, - pub memset: hipMemsetParams, - pub host: hipHostNodeParams, - pub graph: hipChildGraphNodeParams, - pub eventWait: hipEventWaitNodeParams, - pub eventRecord: hipEventRecordNodeParams, - pub extSemSignal: hipExternalSemaphoreSignalNodeParams, - pub extSemWait: hipExternalSemaphoreWaitNodeParams, - pub alloc: hipMemAllocNodeParams, - pub free: hipMemFreeNodeParams, -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n @defgroup API HIP API\n @{\n\n Defines the HIP API. See the individual sections for more information.\n/\n/**\n @defgroup Driver Initialization and Version\n @{\n This section describes the initializtion and version functions of HIP runtime API.\n\n/\n/**\n @brief Explicitly initializes the HIP runtime.\n\n @param [in] flags Initialization flag, should be zero.\n\n Most HIP APIs implicitly initialize the HIP runtime.\n This API provides control over the timing of the initialization.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipInit(flags: ::std::os::raw::c_uint) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the approximate HIP driver version.\n\n @param [out] driverVersion driver version\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning The HIP feature set does not correspond to an exact CUDA SDK driver revision.\n This function always set *driverVersion to 4 as an approximation though HIP supports\n some features which were introduced in later CUDA SDK revisions.\n HIP apps code should not rely on the driver revision number here and should\n use arch feature flags to test device capabilities or conditional compilation.\n\n @see hipRuntimeGetVersion"] - pub fn hipDriverGetVersion(driverVersion: *mut ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the approximate HIP Runtime version.\n\n @param [out] runtimeVersion HIP runtime version\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning The version definition of HIP runtime is different from CUDA.\n On AMD platform, the function returns HIP runtime version,\n while on NVIDIA platform, it returns CUDA runtime version.\n And there is no mapping/correlation between HIP version and CUDA version.\n\n @see hipDriverGetVersion"] - pub fn hipRuntimeGetVersion(runtimeVersion: *mut ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a handle to a compute device\n @param [out] device Handle of device\n @param [in] ordinal Device ordinal\n\n @returns #hipSuccess, #hipErrorInvalidDevice"] - pub fn hipDeviceGet(device: *mut hipDevice_t, ordinal: ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the compute capability of the device\n @param [out] major Major compute capability version number\n @param [out] minor Minor compute capability version number\n @param [in] device Device ordinal\n\n @returns #hipSuccess, #hipErrorInvalidDevice"] - pub fn hipDeviceComputeCapability( - major: *mut ::std::os::raw::c_int, - minor: *mut ::std::os::raw::c_int, - device: hipDevice_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns an identifer string for the device.\n @param [out] name String of the device name\n @param [in] len Maximum length of string to store in device name\n @param [in] device Device ordinal\n\n @returns #hipSuccess, #hipErrorInvalidDevice"] - pub fn hipDeviceGetName( - name: *mut ::std::os::raw::c_char, - len: ::std::os::raw::c_int, - device: hipDevice_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns an UUID for the device.[BETA]\n @param [out] uuid UUID for the device\n @param [in] device device ordinal\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue, #hipErrorNotInitialized,\n #hipErrorDeinitialized"] - pub fn hipDeviceGetUuid(uuid: *mut hipUUID, device: hipDevice_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a value for attribute of link between two devices\n @param [out] value Pointer of the value for the attrubute\n @param [in] attr enum of hipDeviceP2PAttr to query\n @param [in] srcDevice The source device of the link\n @param [in] dstDevice The destination device of the link\n\n @returns #hipSuccess, #hipErrorInvalidDevice"] - pub fn hipDeviceGetP2PAttribute( - value: *mut ::std::os::raw::c_int, - attr: hipDeviceP2PAttr, - srcDevice: ::std::os::raw::c_int, - dstDevice: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a PCI Bus Id string for the device, overloaded to take int device ID.\n @param [out] pciBusId The string of PCI Bus Id format for the device\n @param [in] len Maximum length of string\n @param [in] device The device ordinal\n\n @returns #hipSuccess, #hipErrorInvalidDevice"] - pub fn hipDeviceGetPCIBusId( - pciBusId: *mut ::std::os::raw::c_char, - len: ::std::os::raw::c_int, - device: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a handle to a compute device.\n @param [out] device The handle of the device\n @param [in] pciBusId The string of PCI Bus Id for the device\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue"] - pub fn hipDeviceGetByPCIBusId( - device: *mut ::std::os::raw::c_int, - pciBusId: *const ::std::os::raw::c_char, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the total amount of memory on the device.\n @param [out] bytes The size of memory in bytes, on the device\n @param [in] device The ordinal of the device\n\n @returns #hipSuccess, #hipErrorInvalidDevice"] - pub fn hipDeviceTotalMem(bytes: *mut usize, device: hipDevice_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n @defgroup Device Device Management\n @{\n This section describes the device management functions of HIP runtime API.\n/\n/**\n @brief Waits on all active streams on current device\n\n When this command is invoked, the host thread gets blocked until all the commands associated\n with streams associated with the device. HIP does not support multiple blocking modes (yet!).\n\n @returns #hipSuccess\n\n @see hipSetDevice, hipDeviceReset"] - pub fn hipDeviceSynchronize() -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief The state of current device is discarded and updated to a fresh state.\n\n Calling this function deletes all streams created, memory allocated, kernels running, events\n created. Make sure that no other thread is using the device or streams, memory, kernels, events\n associated with the current device.\n\n @returns #hipSuccess\n\n @see hipDeviceSynchronize"] - pub fn hipDeviceReset() -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set default device to be used for subsequent hip API calls from this thread.\n\n @param[in] deviceId Valid device in range 0...hipGetDeviceCount().\n\n Sets @p device as the default device for the calling host thread. Valid device id's are 0...\n (hipGetDeviceCount()-1).\n\n Many HIP APIs implicitly use the \"default device\" :\n\n - Any device memory subsequently allocated from this host thread (using hipMalloc) will be\n allocated on device.\n - Any streams or events created from this host thread will be associated with device.\n - Any kernels launched from this host thread (using hipLaunchKernel) will be executed on device\n (unless a specific stream is specified, in which case the device associated with that stream will\n be used).\n\n This function may be called from any host thread. Multiple host threads may use the same device.\n This function does no synchronization with the previous or new device, and has very little\n runtime overhead. Applications can use hipSetDevice to quickly switch the default device before\n making a HIP runtime call which uses the default device.\n\n The default device is stored in thread-local-storage for each thread.\n Thread-pool implementations may inherit the default device of the previous thread. A good\n practice is to always call hipSetDevice at the start of HIP coding sequency to establish a known\n standard device.\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorNoDevice\n\n @see #hipGetDevice, #hipGetDeviceCount"] - pub fn hipSetDevice(deviceId: ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return the default device id for the calling host thread.\n\n @param [out] deviceId *device is written with the default device\n\n HIP maintains an default device for each thread using thread-local-storage.\n This device is used implicitly for HIP runtime APIs called by this thread.\n hipGetDevice returns in * @p device the default device for the calling host thread.\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see hipSetDevice, hipGetDevicesizeBytes"] - pub fn hipGetDevice(deviceId: *mut ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return number of compute-capable devices.\n\n @param [out] count Returns number of compute-capable devices.\n\n @returns #hipSuccess, #hipErrorNoDevice\n\n\n Returns in @p *count the number of devices that have ability to run compute commands. If there\n are no such devices, then @ref hipGetDeviceCount will return #hipErrorNoDevice. If 1 or more\n devices can be found, then hipGetDeviceCount returns #hipSuccess."] - pub fn hipGetDeviceCount(count: *mut ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Query for a specific device attribute.\n\n @param [out] pi pointer to value to return\n @param [in] attr attribute to query\n @param [in] deviceId which device to query for information\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue"] - pub fn hipDeviceGetAttribute( - pi: *mut ::std::os::raw::c_int, - attr: hipDeviceAttribute_t, - deviceId: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the default memory pool of the specified device\n\n @param [out] mem_pool Default memory pool to return\n @param [in] device Device index for query the default memory pool\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @see hipDeviceGetDefaultMemPool, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute,\n hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDeviceGetDefaultMemPool( - mem_pool: *mut hipMemPool_t, - device: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the current memory pool of a device\n\n The memory pool must be local to the specified device.\n @p hipMallocAsync allocates from the current mempool of the provided stream's device.\n By default, a device's current memory pool is its default memory pool.\n\n @note Use @p hipMallocFromPoolAsync for asynchronous memory allocations from a device\n different than the one the stream runs on.\n\n @param [in] device Device index for the update\n @param [in] mem_pool Memory pool for update as the current on the specified device\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDevice, #hipErrorNotSupported\n\n @see hipDeviceGetDefaultMemPool, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute,\n hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDeviceSetMemPool(device: ::std::os::raw::c_int, mem_pool: hipMemPool_t) - -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the current memory pool for the specified device\n\n Returns the last pool provided to @p hipDeviceSetMemPool for this device\n or the device's default memory pool if @p hipDeviceSetMemPool has never been called.\n By default the current mempool is the default mempool for a device,\n otherwise the returned pool must have been set with @p hipDeviceSetMemPool.\n\n @param [out] mem_pool Current memory pool on the specified device\n @param [in] device Device index to query the current memory pool\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @see hipDeviceGetDefaultMemPool, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute,\n hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDeviceGetMemPool( - mem_pool: *mut hipMemPool_t, - device: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns device properties.\n\n @param [out] prop written with device properties\n @param [in] deviceId which device to query for information\n\n @return #hipSuccess, #hipErrorInvalidDevice\n @bug HCC always returns 0 for maxThreadsPerMultiProcessor\n @bug HCC always returns 0 for regsPerBlock\n @bug HCC always returns 0 for l2CacheSize\n\n Populates hipGetDeviceProperties with information for the specified device."] - pub fn hipGetDevicePropertiesR0600( - prop: *mut hipDeviceProp_tR0600, - deviceId: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set L1/Shared cache partition.\n\n @param [in] cacheConfig Cache configuration\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorNotSupported\n\n Note: AMD devices do not support reconfigurable cache. This API is not implemented\n on AMD platform. If the function is called, it will return hipErrorNotSupported.\n"] - pub fn hipDeviceSetCacheConfig(cacheConfig: hipFuncCache_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get Cache configuration for a specific Device\n\n @param [out] cacheConfig Pointer of cache configuration\n\n @returns #hipSuccess, #hipErrorNotInitialized\n Note: AMD devices do not support reconfigurable cache. This hint is ignored\n on these architectures.\n"] - pub fn hipDeviceGetCacheConfig(cacheConfig: *mut hipFuncCache_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets resource limits of current device\n\n The function queries the size of limit value, as required by the input enum value hipLimit_t,\n which can be either #hipLimitStackSize, or #hipLimitMallocHeapSize. Any other input as\n default, the function will return #hipErrorUnsupportedLimit.\n\n @param [out] pValue Returns the size of the limit in bytes\n @param [in] limit The limit to query\n\n @returns #hipSuccess, #hipErrorUnsupportedLimit, #hipErrorInvalidValue\n"] - pub fn hipDeviceGetLimit(pValue: *mut usize, limit: hipLimit_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets resource limits of current device.\n\n As the input enum limit,\n #hipLimitStackSize sets the limit value of the stack size on the current GPU device, per thread.\n The limit size can get via hipDeviceGetLimit. The size is in units of 256 dwords, up to the limit\n (128K - 16).\n\n #hipLimitMallocHeapSize sets the limit value of the heap used by the malloc()/free()\n calls. For limit size, use the #hipDeviceGetLimit API.\n\n Any other input as default, the funtion will return hipErrorUnsupportedLimit.\n\n @param [in] limit Enum of hipLimit_t to set\n @param [in] value The size of limit value in bytes\n\n @returns #hipSuccess, #hipErrorUnsupportedLimit, #hipErrorInvalidValue\n"] - pub fn hipDeviceSetLimit(limit: hipLimit_t, value: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns bank width of shared memory for current device\n\n @param [out] pConfig The pointer of the bank width for shared memory\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized\n\n Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is\n ignored on those architectures.\n"] - pub fn hipDeviceGetSharedMemConfig(pConfig: *mut hipSharedMemConfig) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the flags set for current device\n\n @param [out] flags Pointer of the flags\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue"] - pub fn hipGetDeviceFlags(flags: *mut ::std::os::raw::c_uint) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief The bank width of shared memory on current device is set\n\n @param [in] config Configuration for the bank width of shared memory\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized\n\n Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is\n ignored on those architectures.\n"] - pub fn hipDeviceSetSharedMemConfig(config: hipSharedMemConfig) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief The current device behavior is changed according the flags passed.\n\n @param [in] flags Flag to set on the current device\n\n The schedule flags impact how HIP waits for the completion of a command running on a device.\n hipDeviceScheduleSpin : HIP runtime will actively spin in the thread which submitted the\n work until the command completes. This offers the lowest latency, but will consume a CPU core\n and may increase power. hipDeviceScheduleYield : The HIP runtime will yield the CPU to\n system so that other tasks can use it. This may increase latency to detect the completion but\n will consume less power and is friendlier to other tasks in the system.\n hipDeviceScheduleBlockingSync : On ROCm platform, this is a synonym for hipDeviceScheduleYield.\n hipDeviceScheduleAuto : Use a hueristic to select between Spin and Yield modes. If the\n number of HIP contexts is greater than the number of logical processors in the system, use Spin\n scheduling. Else use Yield scheduling.\n\n\n hipDeviceMapHost : Allow mapping host memory. On ROCM, this is always allowed and\n the flag is ignored. hipDeviceLmemResizeToMax : @warning ROCm silently ignores this flag.\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorSetOnActiveProcess\n\n"] - pub fn hipSetDeviceFlags(flags: ::std::os::raw::c_uint) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Device which matches hipDeviceProp_t is returned\n\n @param [out] device Pointer of the device\n @param [in] prop Pointer of the properties\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipChooseDeviceR0600( - device: *mut ::std::os::raw::c_int, - prop: *const hipDeviceProp_tR0600, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the link type and hop count between two devices\n\n @param [in] device1 Ordinal for device1\n @param [in] device2 Ordinal for device2\n @param [out] linktype Returns the link type (See hsa_amd_link_info_type_t) between the two devices\n @param [out] hopcount Returns the hop count between the two devices\n\n Queries and returns the HSA link type and the hop count between the two specified devices.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipExtGetLinkTypeAndHopCount( - device1: ::std::os::raw::c_int, - device2: ::std::os::raw::c_int, - linktype: *mut u32, - hopcount: *mut u32, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets an interprocess memory handle for an existing device memory\n allocation\n\n Takes a pointer to the base of an existing device memory allocation created\n with hipMalloc and exports it for use in another process. This is a\n lightweight operation and may be called multiple times on an allocation\n without adverse effects.\n\n If a region of memory is freed with hipFree and a subsequent call\n to hipMalloc returns memory with the same device address,\n hipIpcGetMemHandle will return a unique handle for the\n new memory.\n\n @param handle - Pointer to user allocated hipIpcMemHandle to return\n the handle in.\n @param devPtr - Base pointer to previously allocated device memory\n\n @returns #hipSuccess, #hipErrorInvalidHandle, #hipErrorOutOfMemory, #hipErrorMapFailed\n\n @note This IPC memory related feature API on Windows may behave differently from Linux.\n"] - pub fn hipIpcGetMemHandle( - handle: *mut hipIpcMemHandle_t, - devPtr: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Opens an interprocess memory handle exported from another process\n and returns a device pointer usable in the local process.\n\n Maps memory exported from another process with hipIpcGetMemHandle into\n the current device address space. For contexts on different devices\n hipIpcOpenMemHandle can attempt to enable peer access between the\n devices as if the user called hipDeviceEnablePeerAccess. This behavior is\n controlled by the hipIpcMemLazyEnablePeerAccess flag.\n hipDeviceCanAccessPeer can determine if a mapping is possible.\n\n Contexts that may open hipIpcMemHandles are restricted in the following way.\n hipIpcMemHandles from each device in a given process may only be opened\n by one context per device per other process.\n\n Memory returned from hipIpcOpenMemHandle must be freed with\n hipIpcCloseMemHandle.\n\n Calling hipFree on an exported memory region before calling\n hipIpcCloseMemHandle in the importing context will result in undefined\n behavior.\n\n @param devPtr - Returned device pointer\n @param handle - hipIpcMemHandle to open\n @param flags - Flags for this operation. Must be specified as hipIpcMemLazyEnablePeerAccess\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext,\n #hipErrorInvalidDevicePointer\n\n @note During multiple processes, using the same memory handle opened by the current context,\n there is no guarantee that the same device poiter will be returned in @p *devPtr.\n This is diffrent from CUDA.\n @note This IPC memory related feature API on Windows may behave differently from Linux.\n"] - pub fn hipIpcOpenMemHandle( - devPtr: *mut *mut ::std::os::raw::c_void, - handle: hipIpcMemHandle_t, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Close memory mapped with hipIpcOpenMemHandle\n\n Unmaps memory returnd by hipIpcOpenMemHandle. The original allocation\n in the exporting process as well as imported mappings in other processes\n will be unaffected.\n\n Any resources used to enable peer access will be freed if this is the\n last mapping using them.\n\n @param devPtr - Device pointer returned by hipIpcOpenMemHandle\n\n @returns #hipSuccess, #hipErrorMapFailed, #hipErrorInvalidHandle\n\n @note This IPC memory related feature API on Windows may behave differently from Linux.\n"] - pub fn hipIpcCloseMemHandle(devPtr: *mut ::std::os::raw::c_void) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets an opaque interprocess handle for an event.\n\n This opaque handle may be copied into other processes and opened with hipIpcOpenEventHandle.\n Then hipEventRecord, hipEventSynchronize, hipStreamWaitEvent and hipEventQuery may be used in\n either process. Operations on the imported event after the exported event has been freed with hipEventDestroy\n will result in undefined behavior.\n\n @param[out] handle Pointer to hipIpcEventHandle to return the opaque event handle\n @param[in] event Event allocated with hipEventInterprocess and hipEventDisableTiming flags\n\n @returns #hipSuccess, #hipErrorInvalidConfiguration, #hipErrorInvalidValue\n\n @note This IPC event related feature API is currently applicable on Linux.\n"] - pub fn hipIpcGetEventHandle(handle: *mut hipIpcEventHandle_t, event: hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Opens an interprocess event handles.\n\n Opens an interprocess event handle exported from another process with cudaIpcGetEventHandle. The returned\n hipEvent_t behaves like a locally created event with the hipEventDisableTiming flag specified. This event\n need be freed with hipEventDestroy. Operations on the imported event after the exported event has been freed\n with hipEventDestroy will result in undefined behavior. If the function is called within the same process where\n handle is returned by hipIpcGetEventHandle, it will return hipErrorInvalidContext.\n\n @param[out] event Pointer to hipEvent_t to return the event\n @param[in] handle The opaque interprocess handle to open\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext\n\n @note This IPC event related feature API is currently applicable on Linux.\n"] - pub fn hipIpcOpenEventHandle(event: *mut hipEvent_t, handle: hipIpcEventHandle_t) - -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n\n @defgroup Execution Execution Control\n @{\n This section describes the execution control functions of HIP runtime API.\n\n/\n/**\n @brief Set attribute for a specific function\n\n @param [in] func Pointer of the function\n @param [in] attr Attribute to set\n @param [in] value Value to set\n\n @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue\n\n Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is\n ignored on those architectures.\n"] - pub fn hipFuncSetAttribute( - func: *const ::std::os::raw::c_void, - attr: hipFuncAttribute, - value: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set Cache configuration for a specific function\n\n @param [in] func Pointer of the function.\n @param [in] config Configuration to set.\n\n @returns #hipSuccess, #hipErrorNotInitialized\n Note: AMD devices and some Nvidia GPUS do not support reconfigurable cache. This hint is ignored\n on those architectures.\n"] - pub fn hipFuncSetCacheConfig( - func: *const ::std::os::raw::c_void, - config: hipFuncCache_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set shared memory configuation for a specific function\n\n @param [in] func Pointer of the function\n @param [in] config Configuration\n\n @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue\n\n Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is\n ignored on those architectures.\n"] - pub fn hipFuncSetSharedMemConfig( - func: *const ::std::os::raw::c_void, - config: hipSharedMemConfig, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Error Error Handling\n @{\n This section describes the error handling functions of HIP runtime API.\n/\n/**\n @brief Return last error returned by any HIP runtime API call and resets the stored error code to\n #hipSuccess\n\n @returns return code from last HIP called from the active host thread\n\n Returns the last error that has been returned by any of the runtime calls in the same host\n thread, and then resets the saved error to #hipSuccess.\n\n @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t"] - pub fn hipGetLastError() -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return last error returned by any HIP runtime API call and resets the stored error code to\n #hipSuccess\n\n @returns return code from last HIP called from the active host thread\n\n Returns the last error that has been returned by any of the runtime calls in the same host\n thread, and then resets the saved error to #hipSuccess.\n\n @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t"] - pub fn hipExtGetLastError() -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return last error returned by any HIP runtime API call.\n\n @return #hipSuccess\n\n Returns the last error that has been returned by any of the runtime calls in the same host\n thread. Unlike hipGetLastError, this function does not reset the saved error code.\n\n @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t"] - pub fn hipPeekAtLastError() -> hipError_t; -} -extern "C" { - #[doc = " @brief Return hip error as text string form.\n\n @param hip_error Error code to convert to name.\n @return const char pointer to the NULL-terminated error name\n\n @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t"] - pub fn hipGetErrorName(hip_error: hipError_t) -> *const ::std::os::raw::c_char; -} -extern "C" { - #[doc = " @brief Return handy text string message to explain the error which occurred\n\n @param hipError Error code to convert to string.\n @return const char pointer to the NULL-terminated error string\n\n @see hipGetErrorName, hipGetLastError, hipPeakAtLastError, hipError_t"] - pub fn hipGetErrorString(hipError: hipError_t) -> *const ::std::os::raw::c_char; -} -extern "C" { - #[must_use] - #[doc = " @brief Return hip error as text string form.\n\n @param [in] hipError Error code to convert to string.\n @param [out] errorString char pointer to the NULL-terminated error string\n @return #hipSuccess, #hipErrorInvalidValue\n\n @see hipGetErrorName, hipGetLastError, hipPeakAtLastError, hipError_t"] - pub fn hipDrvGetErrorName( - hipError: hipError_t, - errorString: *mut *const ::std::os::raw::c_char, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return handy text string message to explain the error which occurred\n\n @param [in] hipError Error code to convert to string.\n @param [out] errorString char pointer to the NULL-terminated error string\n @return #hipSuccess, #hipErrorInvalidValue\n\n @see hipGetErrorName, hipGetLastError, hipPeakAtLastError, hipError_t"] - pub fn hipDrvGetErrorString( - hipError: hipError_t, - errorString: *mut *const ::std::os::raw::c_char, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create an asynchronous stream.\n\n @param[in, out] stream Valid pointer to hipStream_t. This function writes the memory with the\n newly created stream.\n @return #hipSuccess, #hipErrorInvalidValue\n\n Create a new asynchronous stream. @p stream returns an opaque handle that can be used to\n reference the newly created stream in subsequent hipStream* commands. The stream is allocated on\n the heap and will remain allocated even if the handle goes out-of-scope. To release the memory\n used by the stream, applicaiton must call hipStreamDestroy.\n\n @return #hipSuccess, #hipErrorInvalidValue\n\n @see hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy"] - pub fn hipStreamCreate(stream: *mut hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create an asynchronous stream.\n\n @param[in, out] stream Pointer to new stream\n @param[in ] flags to control stream creation.\n @return #hipSuccess, #hipErrorInvalidValue\n\n Create a new asynchronous stream. @p stream returns an opaque handle that can be used to\n reference the newly created stream in subsequent hipStream* commands. The stream is allocated on\n the heap and will remain allocated even if the handle goes out-of-scope. To release the memory\n used by the stream, applicaiton must call hipStreamDestroy. Flags controls behavior of the\n stream. See #hipStreamDefault, #hipStreamNonBlocking.\n\n\n @see hipStreamCreate, hipStreamCreateWithPriority, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy"] - pub fn hipStreamCreateWithFlags( - stream: *mut hipStream_t, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create an asynchronous stream with the specified priority.\n\n @param[in, out] stream Pointer to new stream\n @param[in ] flags to control stream creation.\n @param[in ] priority of the stream. Lower numbers represent higher priorities.\n @return #hipSuccess, #hipErrorInvalidValue\n\n Create a new asynchronous stream with the specified priority. @p stream returns an opaque handle\n that can be used to reference the newly created stream in subsequent hipStream* commands. The\n stream is allocated on the heap and will remain allocated even if the handle goes out-of-scope.\n To release the memory used by the stream, applicaiton must call hipStreamDestroy. Flags controls\n behavior of the stream. See #hipStreamDefault, #hipStreamNonBlocking.\n\n\n @see hipStreamCreate, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy"] - pub fn hipStreamCreateWithPriority( - stream: *mut hipStream_t, - flags: ::std::os::raw::c_uint, - priority: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns numerical values that correspond to the least and greatest stream priority.\n\n @param[in, out] leastPriority pointer in which value corresponding to least priority is returned.\n @param[in, out] greatestPriority pointer in which value corresponding to greatest priority is returned.\n @returns #hipSuccess\n\n Returns in *leastPriority and *greatestPriority the numerical values that correspond to the least\n and greatest stream priority respectively. Stream priorities follow a convention where lower numbers\n imply greater priorities. The range of meaningful stream priorities is given by\n [*greatestPriority, *leastPriority]. If the user attempts to create a stream with a priority value\n that is outside the the meaningful range as specified by this API, the priority is automatically\n clamped to within the valid range."] - pub fn hipDeviceGetStreamPriorityRange( - leastPriority: *mut ::std::os::raw::c_int, - greatestPriority: *mut ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys the specified stream.\n\n @param[in] stream stream identifier.\n @return #hipSuccess #hipErrorInvalidHandle\n\n Destroys the specified stream.\n\n If commands are still executing on the specified stream, some may complete execution before the\n queue is deleted.\n\n The queue may be destroyed while some commands are still inflight, or may wait for all commands\n queued to the stream before destroying it.\n\n @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamQuery,\n hipStreamWaitEvent, hipStreamSynchronize"] - pub fn hipStreamDestroy(stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return #hipSuccess if all of the operations in the specified @p stream have completed, or\n #hipErrorNotReady if not.\n\n @param[in] stream stream to query\n\n @return #hipSuccess, #hipErrorNotReady, #hipErrorInvalidHandle\n\n This is thread-safe and returns a snapshot of the current state of the queue. However, if other\n host threads are sending work to the stream, the status may change immediately after the function\n is called. It is typically used for debug.\n\n @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamWaitEvent,\n hipStreamSynchronize, hipStreamDestroy"] - pub fn hipStreamQuery(stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Wait for all commands in stream to complete.\n\n @param[in] stream stream identifier.\n\n @return #hipSuccess, #hipErrorInvalidHandle\n\n This command is host-synchronous : the host will block until the specified stream is empty.\n\n This command follows standard null-stream semantics. Specifically, specifying the null stream\n will cause the command to wait for other streams on the same device to complete all pending\n operations.\n\n This command honors the hipDeviceLaunchBlocking flag, which controls whether the wait is active\n or blocking.\n\n @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamWaitEvent,\n hipStreamDestroy\n"] - pub fn hipStreamSynchronize(stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Make the specified compute stream wait for an event\n\n @param[in] stream stream to make wait.\n @param[in] event event to wait on\n @param[in] flags control operation [must be 0]\n\n @return #hipSuccess, #hipErrorInvalidHandle\n\n This function inserts a wait operation into the specified stream.\n All future work submitted to @p stream will wait until @p event reports completion before\n beginning execution.\n\n This function only waits for commands in the current stream to complete. Notably,, this function\n does not impliciy wait for commands in the default stream to complete, even if the specified\n stream is created with hipStreamNonBlocking = 0.\n\n @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamSynchronize, hipStreamDestroy"] - pub fn hipStreamWaitEvent( - stream: hipStream_t, - event: hipEvent_t, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return flags associated with this stream.\n\n @param[in] stream stream to be queried\n @param[in,out] flags Pointer to an unsigned integer in which the stream's flags are returned\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidHandle\n\n @returns #hipSuccess #hipErrorInvalidValue #hipErrorInvalidHandle\n\n Return flags associated with this stream in *@p flags.\n\n @see hipStreamCreateWithFlags"] - pub fn hipStreamGetFlags(stream: hipStream_t, flags: *mut ::std::os::raw::c_uint) - -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Query the priority of a stream.\n\n @param[in] stream stream to be queried\n @param[in,out] priority Pointer to an unsigned integer in which the stream's priority is returned\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidHandle\n\n @returns #hipSuccess #hipErrorInvalidValue #hipErrorInvalidHandle\n\n Query the priority of a stream. The priority is returned in in priority.\n\n @see hipStreamCreateWithFlags"] - pub fn hipStreamGetPriority( - stream: hipStream_t, - priority: *mut ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get the device assocaited with the stream\n\n @param[in] stream stream to be queried\n @param[out] device device associated with the stream\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorContextIsDestroyed, #hipErrorInvalidHandle,\n #hipErrorNotInitialized, #hipErrorDeinitialized, #hipErrorInvalidContext\n\n @see hipStreamCreate, hipStreamDestroy, hipDeviceGetStreamPriorityRange"] - pub fn hipStreamGetDevice(stream: hipStream_t, device: *mut hipDevice_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create an asynchronous stream with the specified CU mask.\n\n @param[in, out] stream Pointer to new stream\n @param[in ] cuMaskSize Size of CU mask bit array passed in.\n @param[in ] cuMask Bit-vector representing the CU mask. Each active bit represents using one CU.\n The first 32 bits represent the first 32 CUs, and so on. If its size is greater than physical\n CU number (i.e., multiProcessorCount member of hipDeviceProp_t), the extra elements are ignored.\n It is user's responsibility to make sure the input is meaningful.\n @return #hipSuccess, #hipErrorInvalidHandle, #hipErrorInvalidValue\n\n Create a new asynchronous stream with the specified CU mask. @p stream returns an opaque handle\n that can be used to reference the newly created stream in subsequent hipStream* commands. The\n stream is allocated on the heap and will remain allocated even if the handle goes out-of-scope.\n To release the memory used by the stream, application must call hipStreamDestroy.\n\n\n @see hipStreamCreate, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy"] - pub fn hipExtStreamCreateWithCUMask( - stream: *mut hipStream_t, - cuMaskSize: u32, - cuMask: *const u32, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get CU mask associated with an asynchronous stream\n\n @param[in] stream stream to be queried\n @param[in] cuMaskSize number of the block of memories (uint32_t *) allocated by user\n @param[out] cuMask Pointer to a pre-allocated block of memories (uint32_t *) in which\n the stream's CU mask is returned. The CU mask is returned in a chunck of 32 bits where\n each active bit represents one active CU\n @return #hipSuccess, #hipErrorInvalidHandle, #hipErrorInvalidValue\n\n @see hipStreamCreate, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy"] - pub fn hipExtStreamGetCUMask( - stream: hipStream_t, - cuMaskSize: u32, - cuMask: *mut u32, - ) -> hipError_t; -} -#[doc = " Stream CallBack struct"] -pub type hipStreamCallback_t = ::std::option::Option< - unsafe extern "C" fn( - stream: hipStream_t, - status: hipError_t, - userData: *mut ::std::os::raw::c_void, - ), ->; -extern "C" { - #[must_use] - #[doc = " @brief Adds a callback to be called on the host after all currently enqueued\n items in the stream have completed. For each\n hipStreamAddCallback call, a callback will be executed exactly once.\n The callback will block later work in the stream until it is finished.\n @param[in] stream - Stream to add callback to\n @param[in] callback - The function to call once preceding stream operations are complete\n @param[in] userData - User specified data to be passed to the callback function\n @param[in] flags - Reserved for future use, must be 0\n @return #hipSuccess, #hipErrorInvalidHandle, #hipErrorNotSupported\n\n @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamQuery, hipStreamSynchronize,\n hipStreamWaitEvent, hipStreamDestroy, hipStreamCreateWithPriority\n"] - pub fn hipStreamAddCallback( - stream: hipStream_t, - callback: hipStreamCallback_t, - userData: *mut ::std::os::raw::c_void, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup StreamM Stream Memory Operations\n @{\n This section describes Stream Memory Wait and Write functions of HIP runtime API.\n/\n/**\n @brief Enqueues a wait command to the stream.[BETA]\n\n @param [in] stream - Stream identifier\n @param [in] ptr - Pointer to memory object allocated using 'hipMallocSignalMemory' flag\n @param [in] value - Value to be used in compare operation\n @param [in] flags - Defines the compare operation, supported values are hipStreamWaitValueGte\n hipStreamWaitValueEq, hipStreamWaitValueAnd and hipStreamWaitValueNor\n @param [in] mask - Mask to be applied on value at memory before it is compared with value,\n default value is set to enable every bit\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n Enqueues a wait command to the stream, all operations enqueued on this stream after this, will\n not execute until the defined wait condition is true.\n\n hipStreamWaitValueGte: waits until *ptr&mask >= value\n hipStreamWaitValueEq : waits until *ptr&mask == value\n hipStreamWaitValueAnd: waits until ((*ptr&mask) & value) != 0\n hipStreamWaitValueNor: waits until ~((*ptr&mask) | (value&mask)) != 0\n\n @note when using 'hipStreamWaitValueNor', mask is applied on both 'value' and '*ptr'.\n\n @note Support for hipStreamWaitValue32 can be queried using 'hipDeviceGetAttribute()' and\n 'hipDeviceAttributeCanUseStreamWaitValue' flag.\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @see hipExtMallocWithFlags, hipFree, hipStreamWaitValue64, hipStreamWriteValue64,\n hipStreamWriteValue32, hipDeviceGetAttribute"] - pub fn hipStreamWaitValue32( - stream: hipStream_t, - ptr: *mut ::std::os::raw::c_void, - value: u32, - flags: ::std::os::raw::c_uint, - mask: u32, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Enqueues a wait command to the stream.[BETA]\n\n @param [in] stream - Stream identifier\n @param [in] ptr - Pointer to memory object allocated using 'hipMallocSignalMemory' flag\n @param [in] value - Value to be used in compare operation\n @param [in] flags - Defines the compare operation, supported values are hipStreamWaitValueGte\n hipStreamWaitValueEq, hipStreamWaitValueAnd and hipStreamWaitValueNor.\n @param [in] mask - Mask to be applied on value at memory before it is compared with value\n default value is set to enable every bit\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n Enqueues a wait command to the stream, all operations enqueued on this stream after this, will\n not execute until the defined wait condition is true.\n\n hipStreamWaitValueGte: waits until *ptr&mask >= value\n hipStreamWaitValueEq : waits until *ptr&mask == value\n hipStreamWaitValueAnd: waits until ((*ptr&mask) & value) != 0\n hipStreamWaitValueNor: waits until ~((*ptr&mask) | (value&mask)) != 0\n\n @note when using 'hipStreamWaitValueNor', mask is applied on both 'value' and '*ptr'.\n\n @note Support for hipStreamWaitValue64 can be queried using 'hipDeviceGetAttribute()' and\n 'hipDeviceAttributeCanUseStreamWaitValue' flag.\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @see hipExtMallocWithFlags, hipFree, hipStreamWaitValue32, hipStreamWriteValue64,\n hipStreamWriteValue32, hipDeviceGetAttribute"] - pub fn hipStreamWaitValue64( - stream: hipStream_t, - ptr: *mut ::std::os::raw::c_void, - value: u64, - flags: ::std::os::raw::c_uint, - mask: u64, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Enqueues a write command to the stream.[BETA]\n\n @param [in] stream - Stream identifier\n @param [in] ptr - Pointer to a GPU accessible memory object\n @param [in] value - Value to be written\n @param [in] flags - reserved, ignored for now, will be used in future releases\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n Enqueues a write command to the stream, write operation is performed after all earlier commands\n on this stream have completed the execution.\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @see hipExtMallocWithFlags, hipFree, hipStreamWriteValue32, hipStreamWaitValue32,\n hipStreamWaitValue64"] - pub fn hipStreamWriteValue32( - stream: hipStream_t, - ptr: *mut ::std::os::raw::c_void, - value: u32, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Enqueues a write command to the stream.[BETA]\n\n @param [in] stream - Stream identifier\n @param [in] ptr - Pointer to a GPU accessible memory object\n @param [in] value - Value to be written\n @param [in] flags - reserved, ignored for now, will be used in future releases\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n Enqueues a write command to the stream, write operation is performed after all earlier commands\n on this stream have completed the execution.\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @see hipExtMallocWithFlags, hipFree, hipStreamWriteValue32, hipStreamWaitValue32,\n hipStreamWaitValue64"] - pub fn hipStreamWriteValue64( - stream: hipStream_t, - ptr: *mut ::std::os::raw::c_void, - value: u64, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Event Event Management\n @{\n This section describes the event management functions of HIP runtime API.\n/\n/**\n @brief Create an event with the specified flags\n\n @param[in,out] event Returns the newly created event.\n @param[in] flags Flags to control event behavior. Valid values are #hipEventDefault,\n#hipEventBlockingSync, #hipEventDisableTiming, #hipEventInterprocess\n #hipEventDefault : Default flag. The event will use active synchronization and will support\ntiming. Blocking synchronization provides lowest possible latency at the expense of dedicating a\nCPU to poll on the event.\n #hipEventBlockingSync : The event will use blocking synchronization : if hipEventSynchronize is\ncalled on this event, the thread will block until the event completes. This can increase latency\nfor the synchroniation but can result in lower power and more resources for other CPU threads.\n #hipEventDisableTiming : Disable recording of timing information. Events created with this flag\nwould not record profiling data and provide best performance if used for synchronization.\n #hipEventInterprocess : The event can be used as an interprocess event. hipEventDisableTiming\nflag also must be set when hipEventInterprocess flag is set.\n #hipEventDisableSystemFence : Disable acquire and release system scope fence. This may\nimprove performance but device memory may not be visible to the host and other devices\nif this flag is set.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue,\n#hipErrorLaunchFailure, #hipErrorOutOfMemory\n\n @see hipEventCreate, hipEventSynchronize, hipEventDestroy, hipEventElapsedTime"] - pub fn hipEventCreateWithFlags( - event: *mut hipEvent_t, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " Create an event\n\n @param[in,out] event Returns the newly created event.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue,\n #hipErrorLaunchFailure, #hipErrorOutOfMemory\n\n @see hipEventCreateWithFlags, hipEventRecord, hipEventQuery, hipEventSynchronize,\n hipEventDestroy, hipEventElapsedTime"] - pub fn hipEventCreate(event: *mut hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipEventRecord(event: hipEvent_t, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroy the specified event.\n\n @param[in] event Event to destroy.\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue,\n #hipErrorLaunchFailure\n\n Releases memory associated with the event. If the event is recording but has not completed\n recording when hipEventDestroy() is called, the function will return immediately and the\n completion_future resources will be released later, when the hipDevice is synchronized.\n\n @see hipEventCreate, hipEventCreateWithFlags, hipEventQuery, hipEventSynchronize, hipEventRecord,\n hipEventElapsedTime\n\n @returns #hipSuccess"] - pub fn hipEventDestroy(event: hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Wait for an event to complete.\n\n This function will block until the event is ready, waiting for all previous work in the stream\n specified when event was recorded with hipEventRecord().\n\n If hipEventRecord() has not been called on @p event, this function returns #hipSuccess when no\n event is captured.\n\n This function needs to support hipEventBlockingSync parameter.\n\n @param[in] event Event on which to wait.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized,\n #hipErrorInvalidHandle, #hipErrorLaunchFailure\n\n @see hipEventCreate, hipEventCreateWithFlags, hipEventQuery, hipEventDestroy, hipEventRecord,\n hipEventElapsedTime"] - pub fn hipEventSynchronize(event: hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return the elapsed time between two events.\n\n @param[out] ms : Return time between start and stop in ms.\n @param[in] start : Start event.\n @param[in] stop : Stop event.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotReady, #hipErrorInvalidHandle,\n #hipErrorNotInitialized, #hipErrorLaunchFailure\n\n Computes the elapsed time between two events. Time is computed in ms, with\n a resolution of approximately 1 us.\n\n Events which are recorded in a NULL stream will block until all commands\n on all other streams complete execution, and then record the timestamp.\n\n Events which are recorded in a non-NULL stream will record their timestamp\n when they reach the head of the specified stream, after all previous\n commands in that stream have completed executing. Thus the time that\n the event recorded may be significantly after the host calls hipEventRecord().\n\n If hipEventRecord() has not been called on either event, then #hipErrorInvalidHandle is\n returned. If hipEventRecord() has been called on both events, but the timestamp has not yet been\n recorded on one or both events (that is, hipEventQuery() would return #hipErrorNotReady on at\n least one of the events), then #hipErrorNotReady is returned.\n\n @see hipEventCreate, hipEventCreateWithFlags, hipEventQuery, hipEventDestroy, hipEventRecord,\n hipEventSynchronize"] - pub fn hipEventElapsedTime(ms: *mut f32, start: hipEvent_t, stop: hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Query event status\n\n @param[in] event Event to query.\n @returns #hipSuccess, #hipErrorNotReady, #hipErrorInvalidHandle, #hipErrorInvalidValue,\n #hipErrorNotInitialized, #hipErrorLaunchFailure\n\n Query the status of the specified event. This function will return #hipSuccess if all\n commands in the appropriate stream (specified to hipEventRecord()) have completed. If any execution\n has not completed, then #hipErrorNotReady is returned.\n\n @note: This API returns #hipSuccess, if hipEventRecord() is not called before this API.\n\n @see hipEventCreate, hipEventCreateWithFlags, hipEventRecord, hipEventDestroy,\n hipEventSynchronize, hipEventElapsedTime"] - pub fn hipEventQuery(event: hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets information on the specified pointer.[BETA]\n\n @param [in] value Sets pointer attribute value\n @param [in] attribute Attribute to set\n @param [in] ptr Pointer to set attributes for\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipPointerSetAttribute( - value: *const ::std::os::raw::c_void, - attribute: hipPointer_attribute, - ptr: hipDeviceptr_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns attributes for the specified pointer\n\n @param [out] attributes attributes for the specified pointer\n @param [in] ptr pointer to get attributes for\n\n The output parameter 'attributes' has a member named 'type' that describes what memory the\n pointer is associated with, such as device memory, host memory, managed memory, and others.\n Otherwise, the API cannot handle the pointer and returns #hipErrorInvalidValue.\n\n @note The unrecognized memory type is unsupported to keep the HIP functionality backward\n compatibility due to #hipMemoryType enum values.\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @note The current behavior of this HIP API corresponds to the CUDA API before version 11.0.\n\n @see hipPointerGetAttribute"] - pub fn hipPointerGetAttributes( - attributes: *mut hipPointerAttribute_t, - ptr: *const ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns information about the specified pointer.[BETA]\n\n @param [in, out] data Returned pointer attribute value\n @param [in] attribute Attribute to query for\n @param [in] ptr Pointer to get attributes for\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @see hipPointerGetAttributes"] - pub fn hipPointerGetAttribute( - data: *mut ::std::os::raw::c_void, - attribute: hipPointer_attribute, - ptr: hipDeviceptr_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns information about the specified pointer.[BETA]\n\n @param [in] numAttributes number of attributes to query for\n @param [in] attributes attributes to query for\n @param [in, out] data a two-dimensional containing pointers to memory locations\n where the result of each attribute query will be written to\n @param [in] ptr pointer to get attributes for\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @see hipPointerGetAttribute"] - pub fn hipDrvPointerGetAttributes( - numAttributes: ::std::os::raw::c_uint, - attributes: *mut hipPointer_attribute, - data: *mut *mut ::std::os::raw::c_void, - ptr: hipDeviceptr_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = "-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup External External Resource Interoperability\n @{\n @ingroup API\n\n This section describes the external resource interoperability functions of HIP runtime API.\n\n/\n/**\n @brief Imports an external semaphore.\n\n @param[out] extSem_out External semaphores to be waited on\n @param[in] semHandleDesc Semaphore import handle descriptor\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] - pub fn hipImportExternalSemaphore( - extSem_out: *mut hipExternalSemaphore_t, - semHandleDesc: *const hipExternalSemaphoreHandleDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Signals a set of external semaphore objects.\n\n @param[in] extSemArray External semaphores to be waited on\n @param[in] paramsArray Array of semaphore parameters\n @param[in] numExtSems Number of semaphores to wait on\n @param[in] stream Stream to enqueue the wait operations in\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] - pub fn hipSignalExternalSemaphoresAsync( - extSemArray: *const hipExternalSemaphore_t, - paramsArray: *const hipExternalSemaphoreSignalParams, - numExtSems: ::std::os::raw::c_uint, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Waits on a set of external semaphore objects\n\n @param[in] extSemArray External semaphores to be waited on\n @param[in] paramsArray Array of semaphore parameters\n @param[in] numExtSems Number of semaphores to wait on\n @param[in] stream Stream to enqueue the wait operations in\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] - pub fn hipWaitExternalSemaphoresAsync( - extSemArray: *const hipExternalSemaphore_t, - paramsArray: *const hipExternalSemaphoreWaitParams, - numExtSems: ::std::os::raw::c_uint, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys an external semaphore object and releases any references to the underlying resource. Any outstanding signals or waits must have completed before the semaphore is destroyed.\n\n @param[in] extSem handle to an external memory object\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] - pub fn hipDestroyExternalSemaphore(extSem: hipExternalSemaphore_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Imports an external memory object.\n\n @param[out] extMem_out Returned handle to an external memory object\n @param[in] memHandleDesc Memory import handle descriptor\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] - pub fn hipImportExternalMemory( - extMem_out: *mut hipExternalMemory_t, - memHandleDesc: *const hipExternalMemoryHandleDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Maps a buffer onto an imported memory object.\n\n @param[out] devPtr Returned device pointer to buffer\n @param[in] extMem Handle to external memory object\n @param[in] bufferDesc Buffer descriptor\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] - pub fn hipExternalMemoryGetMappedBuffer( - devPtr: *mut *mut ::std::os::raw::c_void, - extMem: hipExternalMemory_t, - bufferDesc: *const hipExternalMemoryBufferDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys an external memory object.\n\n @param[in] extMem External memory object to be destroyed\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] - pub fn hipDestroyExternalMemory(extMem: hipExternalMemory_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Maps a mipmapped array onto an external memory object.\n\n @param[out] mipmap mipmapped array to return\n @param[in] extMem external memory object handle\n @param[in] mipmapDesc external mipmapped array descriptor\n\n Returned mipmapped array must be freed using hipFreeMipmappedArray.\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidResourceHandle\n\n @see hipImportExternalMemory, hipDestroyExternalMemory, hipExternalMemoryGetMappedBuffer, hipFreeMipmappedArray"] - pub fn hipExternalMemoryGetMappedMipmappedArray( - mipmap: *mut hipMipmappedArray_t, - extMem: hipExternalMemory_t, - mipmapDesc: *const hipExternalMemoryMipmappedArrayDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n @brief Allocate memory on the default accelerator\n\n @param[out] ptr Pointer to the allocated memory\n @param[in] size Requested memory size\n\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n\n @return #hipSuccess, #hipErrorOutOfMemory, #hipErrorInvalidValue (bad context, null *ptr)\n\n @see hipMallocPitch, hipFree, hipMallocArray, hipFreeArray, hipMalloc3D, hipMalloc3DArray,\n hipHostFree, hipHostMalloc"] - pub fn hipMalloc(ptr: *mut *mut ::std::os::raw::c_void, size: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocate memory on the default accelerator\n\n @param[out] ptr Pointer to the allocated memory\n @param[in] sizeBytes Requested memory size\n @param[in] flags Type of memory allocation\n\n If requested memory size is 0, no memory is allocated, *ptr returns nullptr, and #hipSuccess\n is returned.\n\n The memory allocation flag should be either #hipDeviceMallocDefault,\n #hipDeviceMallocFinegrained, #hipDeviceMallocUncached, or #hipMallocSignalMemory.\n If the flag is any other value, the API returns #hipErrorInvalidValue.\n\n @return #hipSuccess, #hipErrorOutOfMemory, #hipErrorInvalidValue (bad context, null *ptr)\n\n @see hipMallocPitch, hipFree, hipMallocArray, hipFreeArray, hipMalloc3D, hipMalloc3DArray,\n hipHostFree, hipHostMalloc"] - pub fn hipExtMallocWithFlags( - ptr: *mut *mut ::std::os::raw::c_void, - sizeBytes: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocate pinned host memory [Deprecated]\n\n @param[out] ptr Pointer to the allocated host pinned memory\n @param[in] size Requested memory size\n\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n\n @return #hipSuccess, #hipErrorOutOfMemory\n\n @warning This API is deprecated, use hipHostMalloc() instead"] - pub fn hipMallocHost(ptr: *mut *mut ::std::os::raw::c_void, size: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocate pinned host memory [Deprecated]\n\n @param[out] ptr Pointer to the allocated host pinned memory\n @param[in] size Requested memory size\n\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n\n @return #hipSuccess, #hipErrorOutOfMemory\n\n @warning This API is deprecated, use hipHostMalloc() instead"] - pub fn hipMemAllocHost(ptr: *mut *mut ::std::os::raw::c_void, size: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocates device accessible page locked (pinned) host memory\n\n This API allocates pinned host memory which is mapped into the address space of all GPUs\n in the system, the memory can be accessed directly by the GPU device, and can be read or\n written with much higher bandwidth than pageable memory obtained with functions such as\n malloc().\n\n Using the pinned host memory, applications can implement faster data transfers for HostToDevice\n and DeviceToHost. The runtime tracks the hipHostMalloc allocations and can avoid some of the\n setup required for regular unpinned memory.\n\n When the memory accesses are infrequent, zero-copy memory can be a good choice, for coherent\n allocation. GPU can directly access the host memory over the CPU/GPU interconnect, without need\n to copy the data.\n\n Currently the allocation granularity is 4KB for the API.\n\n Developers need to choose proper allocation flag with consideration of synchronization.\n\n @param[out] ptr Pointer to the allocated host pinned memory\n @param[in] size Requested memory size in bytes\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n @param[in] flags Type of host memory allocation\n\n If no input for flags, it will be the default pinned memory allocation on the host.\n\n @return #hipSuccess, #hipErrorOutOfMemory\n\n @see hipSetDeviceFlags, hipHostFree"] - pub fn hipHostMalloc( - ptr: *mut *mut ::std::os::raw::c_void, - size: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = "-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup MemoryM Managed Memory\n\n @ingroup Memory\n @{\n This section describes the managed memory management functions of HIP runtime API.\n\n @note The managed memory management APIs are implemented on Linux, under developement\n on Windows.\n\n/\n/**\n @brief Allocates memory that will be automatically managed by HIP.\n\n This API is used for managed memory, allows data be shared and accessible to both CPU and\n GPU using a single pointer.\n\n The API returns the allocation pointer, managed by HMM, can be used further to execute kernels\n on device and fetch data between the host and device as needed.\n\n @note It is recommend to do the capability check before call this API.\n\n @param [out] dev_ptr - pointer to allocated device memory\n @param [in] size - requested allocation size in bytes, it should be granularity of 4KB\n @param [in] flags - must be either hipMemAttachGlobal or hipMemAttachHost\n (defaults to hipMemAttachGlobal)\n\n @returns #hipSuccess, #hipErrorMemoryAllocation, #hipErrorNotSupported, #hipErrorInvalidValue\n"] - pub fn hipMallocManaged( - dev_ptr: *mut *mut ::std::os::raw::c_void, - size: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Prefetches memory to the specified destination device using HIP.\n\n @param [in] dev_ptr pointer to be prefetched\n @param [in] count size in bytes for prefetching\n @param [in] device destination device to prefetch to\n @param [in] stream stream to enqueue prefetch operation\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPrefetchAsync( - dev_ptr: *const ::std::os::raw::c_void, - count: usize, - device: ::std::os::raw::c_int, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Advise about the usage of a given memory range to HIP.\n\n @param [in] dev_ptr pointer to memory to set the advice for\n @param [in] count size in bytes of the memory range, it should be CPU page size alligned.\n @param [in] advice advice to be applied for the specified memory range\n @param [in] device device to apply the advice for\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n This HIP API advises about the usage to be applied on unified memory allocation in the\n range starting from the pointer address devPtr, with the size of count bytes.\n The memory range must refer to managed memory allocated via the API hipMallocManaged, and the\n range will be handled with proper round down and round up respectively in the driver to\n be aligned to CPU page size, the same way as corresponding CUDA API behaves in CUDA version 8.0\n and afterwards.\n\n @note This API is implemented on Linux and is under development on Windows."] - pub fn hipMemAdvise( - dev_ptr: *const ::std::os::raw::c_void, - count: usize, - advice: hipMemoryAdvise, - device: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Query an attribute of a given memory range in HIP.\n\n @param [in,out] data a pointer to a memory location where the result of each\n attribute query will be written to\n @param [in] data_size the size of data\n @param [in] attribute the attribute to query\n @param [in] dev_ptr start of the range to query\n @param [in] count size of the range to query\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemRangeGetAttribute( - data: *mut ::std::os::raw::c_void, - data_size: usize, - attribute: hipMemRangeAttribute, - dev_ptr: *const ::std::os::raw::c_void, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Query attributes of a given memory range in HIP.\n\n @param [in,out] data a two-dimensional array containing pointers to memory locations\n where the result of each attribute query will be written to\n @param [in] data_sizes an array, containing the sizes of each result\n @param [in] attributes the attribute to query\n @param [in] num_attributes an array of attributes to query (numAttributes and the number\n of attributes in this array should match)\n @param [in] dev_ptr start of the range to query\n @param [in] count size of the range to query\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemRangeGetAttributes( - data: *mut *mut ::std::os::raw::c_void, - data_sizes: *mut usize, - attributes: *mut hipMemRangeAttribute, - num_attributes: usize, - dev_ptr: *const ::std::os::raw::c_void, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Attach memory to a stream asynchronously in HIP.\n\n @param [in] stream - stream in which to enqueue the attach operation\n @param [in] dev_ptr - pointer to memory (must be a pointer to managed memory or\n to a valid host-accessible region of system-allocated memory)\n @param [in] length - length of memory (defaults to zero)\n @param [in] flags - must be one of hipMemAttachGlobal, hipMemAttachHost or\n hipMemAttachSingle (defaults to hipMemAttachSingle)\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipStreamAttachMemAsync( - stream: hipStream_t, - dev_ptr: *mut ::std::os::raw::c_void, - length: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocates memory with stream ordered semantics\n\n Inserts a memory allocation operation into @p stream.\n A pointer to the allocated memory is returned immediately in *dptr.\n The allocation must not be accessed until the the allocation operation completes.\n The allocation comes from the memory pool associated with the stream's device.\n\n @note The default memory pool of a device contains device memory from that device.\n @note Basic stream ordering allows future work submitted into the same stream to use the\n allocation. Stream query, stream synchronize, and HIP events can be used to guarantee that\n the allocation operation completes before work submitted in a separate stream runs.\n @note During stream capture, this function results in the creation of an allocation node.\n In this case, the allocation is owned by the graph instead of the memory pool. The memory\n pool's properties are used to set the node's creation parameters.\n\n @param [out] dev_ptr Returned device pointer of memory allocation\n @param [in] size Number of bytes to allocate\n @param [in] stream The stream establishing the stream ordering contract and\n the memory pool to allocate from\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported, #hipErrorOutOfMemory\n\n @see hipMallocFromPoolAsync, hipFreeAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute,\n hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMallocAsync( - dev_ptr: *mut *mut ::std::os::raw::c_void, - size: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Frees memory with stream ordered semantics\n\n Inserts a free operation into @p stream.\n The allocation must not be used after stream execution reaches the free.\n After this API returns, accessing the memory from any subsequent work launched on the GPU\n or querying its pointer attributes results in undefined behavior.\n\n @note During stream capture, this function results in the creation of a free node and\n must therefore be passed the address of a graph allocation.\n\n @param [in] dev_ptr Pointer to device memory to free\n @param [in] stream The stream, where the destruciton will occur according to the execution order\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute,\n hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipFreeAsync(dev_ptr: *mut ::std::os::raw::c_void, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Releases freed memory back to the OS\n\n Releases memory back to the OS until the pool contains fewer than @p min_bytes_to_keep\n reserved bytes, or there is no more memory that the allocator can safely release.\n The allocator cannot release OS allocations that back outstanding asynchronous allocations.\n The OS allocations may happen at different granularity from the user allocations.\n\n @note: Allocations that have not been freed count as outstanding.\n @note: Allocations that have been asynchronously freed but whose completion has\n not been observed on the host (eg. by a synchronize) can count as outstanding.\n\n @param[in] mem_pool The memory pool to trim allocations\n @param[in] min_bytes_to_hold If the pool has less than min_bytes_to_hold reserved,\n then the TrimTo operation is a no-op. Otherwise the memory pool will contain\n at least min_bytes_to_hold bytes reserved after the operation.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute,\n hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolTrimTo(mem_pool: hipMemPool_t, min_bytes_to_hold: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets attributes of a memory pool\n\n Supported attributes are:\n - @p hipMemPoolAttrReleaseThreshold: (value type = cuuint64_t)\n Amount of reserved memory in bytes to hold onto before trying\n to release memory back to the OS. When more than the release\n threshold bytes of memory are held by the memory pool, the\n allocator will try to release memory back to the OS on the\n next call to stream, event or context synchronize. (default 0)\n - @p hipMemPoolReuseFollowEventDependencies: (value type = int)\n Allow @p hipMallocAsync to use memory asynchronously freed\n in another stream as long as a stream ordering dependency\n of the allocating stream on the free action exists.\n HIP events and null stream interactions can create the required\n stream ordered dependencies. (default enabled)\n - @p hipMemPoolReuseAllowOpportunistic: (value type = int)\n Allow reuse of already completed frees when there is no dependency\n between the free and allocation. (default enabled)\n - @p hipMemPoolReuseAllowInternalDependencies: (value type = int)\n Allow @p hipMallocAsync to insert new stream dependencies\n in order to establish the stream ordering required to reuse\n a piece of memory released by @p hipFreeAsync (default enabled).\n\n @param [in] mem_pool The memory pool to modify\n @param [in] attr The attribute to modify\n @param [in] value Pointer to the value to assign\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute,\n hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolSetAttribute( - mem_pool: hipMemPool_t, - attr: hipMemPoolAttr, - value: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets attributes of a memory pool\n\n Supported attributes are:\n - @p hipMemPoolAttrReleaseThreshold: (value type = cuuint64_t)\n Amount of reserved memory in bytes to hold onto before trying\n to release memory back to the OS. When more than the release\n threshold bytes of memory are held by the memory pool, the\n allocator will try to release memory back to the OS on the\n next call to stream, event or context synchronize. (default 0)\n - @p hipMemPoolReuseFollowEventDependencies: (value type = int)\n Allow @p hipMallocAsync to use memory asynchronously freed\n in another stream as long as a stream ordering dependency\n of the allocating stream on the free action exists.\n HIP events and null stream interactions can create the required\n stream ordered dependencies. (default enabled)\n - @p hipMemPoolReuseAllowOpportunistic: (value type = int)\n Allow reuse of already completed frees when there is no dependency\n between the free and allocation. (default enabled)\n - @p hipMemPoolReuseAllowInternalDependencies: (value type = int)\n Allow @p hipMallocAsync to insert new stream dependencies\n in order to establish the stream ordering required to reuse\n a piece of memory released by @p hipFreeAsync (default enabled).\n\n @param [in] mem_pool The memory pool to get attributes of\n @param [in] attr The attribute to get\n @param [in] value Retrieved value\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync,\n hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolGetAttribute( - mem_pool: hipMemPool_t, - attr: hipMemPoolAttr, - value: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Controls visibility of the specified pool between devices\n\n @param [in] mem_pool Memory pool for acccess change\n @param [in] desc_list Array of access descriptors. Each descriptor instructs the access to enable for a single gpu\n @param [in] count Number of descriptors in the map array.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute,\n hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolSetAccess( - mem_pool: hipMemPool_t, - desc_list: *const hipMemAccessDesc, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the accessibility of a pool from a device\n\n Returns the accessibility of the pool's memory from the specified location.\n\n @param [out] flags Accessibility of the memory pool from the specified location/device\n @param [in] mem_pool Memory pool being queried\n @param [in] location Location/device for memory pool access\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute,\n hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolGetAccess( - flags: *mut hipMemAccessFlags, - mem_pool: hipMemPool_t, - location: *mut hipMemLocation, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memory pool\n\n Creates a HIP memory pool and returns the handle in @p mem_pool. The @p pool_props determines\n the properties of the pool such as the backing device and IPC capabilities.\n\n By default, the memory pool will be accessible from the device it is allocated on.\n\n @param [out] mem_pool Contains createed memory pool\n @param [in] pool_props Memory pool properties\n\n @note Specifying hipMemHandleTypeNone creates a memory pool that will not support IPC.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, hipMemPoolDestroy,\n hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolCreate( - mem_pool: *mut hipMemPool_t, - pool_props: *const hipMemPoolProps, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys the specified memory pool\n\n If any pointers obtained from this pool haven't been freed or\n the pool has free operations that haven't completed\n when @p hipMemPoolDestroy is invoked, the function will return immediately and the\n resources associated with the pool will be released automatically\n once there are no more outstanding allocations.\n\n Destroying the current mempool of a device sets the default mempool of\n that device as the current mempool for that device.\n\n @param [in] mem_pool Memory pool for destruction\n\n @note A device's default memory pool cannot be destroyed.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, hipMemPoolCreate\n hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolDestroy(mem_pool: hipMemPool_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocates memory from a specified pool with stream ordered semantics.\n\n Inserts an allocation operation into @p stream.\n A pointer to the allocated memory is returned immediately in @p dev_ptr.\n The allocation must not be accessed until the the allocation operation completes.\n The allocation comes from the specified memory pool.\n\n @note The specified memory pool may be from a device different than that of the specified @p stream.\n\n Basic stream ordering allows future work submitted into the same stream to use the allocation.\n Stream query, stream synchronize, and HIP events can be used to guarantee that the allocation\n operation completes before work submitted in a separate stream runs.\n\n @note During stream capture, this function results in the creation of an allocation node. In this case,\n the allocation is owned by the graph instead of the memory pool. The memory pool's properties\n are used to set the node's creation parameters.\n\n @param [out] dev_ptr Returned device pointer\n @param [in] size Number of bytes to allocate\n @param [in] mem_pool The pool to allocate from\n @param [in] stream The stream establishing the stream ordering semantic\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported, #hipErrorOutOfMemory\n\n @see hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, hipMemPoolCreate\n hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess,\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMallocFromPoolAsync( - dev_ptr: *mut *mut ::std::os::raw::c_void, - size: usize, - mem_pool: hipMemPool_t, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Exports a memory pool to the requested handle type.\n\n Given an IPC capable mempool, create an OS handle to share the pool with another process.\n A recipient process can convert the shareable handle into a mempool with @p hipMemPoolImportFromShareableHandle.\n Individual pointers can then be shared with the @p hipMemPoolExportPointer and @p hipMemPoolImportPointer APIs.\n The implementation of what the shareable handle is and how it can be transferred is defined by the requested\n handle type.\n\n @note: To create an IPC capable mempool, create a mempool with a @p hipMemAllocationHandleType other\n than @p hipMemHandleTypeNone.\n\n @param [out] shared_handle Pointer to the location in which to store the requested handle\n @param [in] mem_pool Pool to export\n @param [in] handle_type The type of handle to create\n @param [in] flags Must be 0\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory\n\n @see hipMemPoolImportFromShareableHandle\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolExportToShareableHandle( - shared_handle: *mut ::std::os::raw::c_void, - mem_pool: hipMemPool_t, - handle_type: hipMemAllocationHandleType, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Imports a memory pool from a shared handle.\n\n Specific allocations can be imported from the imported pool with @p hipMemPoolImportPointer.\n\n @note Imported memory pools do not support creating new allocations.\n As such imported memory pools may not be used in @p hipDeviceSetMemPool\n or @p hipMallocFromPoolAsync calls.\n\n @param [out] mem_pool Returned memory pool\n @param [in] shared_handle OS handle of the pool to open\n @param [in] handle_type The type of handle being imported\n @param [in] flags Must be 0\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory\n\n @see hipMemPoolExportToShareableHandle\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolImportFromShareableHandle( - mem_pool: *mut hipMemPool_t, - shared_handle: *mut ::std::os::raw::c_void, - handle_type: hipMemAllocationHandleType, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Export data to share a memory pool allocation between processes.\n\n Constructs @p export_data for sharing a specific allocation from an already shared memory pool.\n The recipient process can import the allocation with the @p hipMemPoolImportPointer api.\n The data is not a handle and may be shared through any IPC mechanism.\n\n @param[out] export_data Returned export data\n @param[in] dev_ptr Pointer to memory being exported\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory\n\n @see hipMemPoolImportPointer\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolExportPointer( - export_data: *mut hipMemPoolPtrExportData, - dev_ptr: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Import a memory pool allocation from another process.\n\n Returns in @p dev_ptr a pointer to the imported memory.\n The imported memory must not be accessed before the allocation operation completes\n in the exporting process. The imported memory must be freed from all importing processes before\n being freed in the exporting process. The pointer may be freed with @p hipFree\n or @p hipFreeAsync. If @p hipFreeAsync is used, the free must be completed\n on the importing process before the free operation on the exporting process.\n\n @note The @p hipFreeAsync api may be used in the exporting process before\n the @p hipFreeAsync operation completes in its stream as long as the\n @p hipFreeAsync in the exporting process specifies a stream with\n a stream dependency on the importing process's @p hipFreeAsync.\n\n @param [out] dev_ptr Pointer to imported memory\n @param [in] mem_pool Memory pool from which to import a pointer\n @param [in] export_data Data specifying the memory to import\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized, #hipErrorOutOfMemory\n\n @see hipMemPoolExportPointer\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemPoolImportPointer( - dev_ptr: *mut *mut ::std::os::raw::c_void, - mem_pool: hipMemPool_t, - export_data: *mut hipMemPoolPtrExportData, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocate device accessible page locked host memory [Deprecated]\n\n @param[out] ptr Pointer to the allocated host pinned memory\n @param[in] size Requested memory size in bytes\n @param[in] flags Type of host memory allocation\n\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n\n @return #hipSuccess, #hipErrorOutOfMemory\n\n @warning This API is deprecated, use hipHostMalloc() instead"] - pub fn hipHostAlloc( - ptr: *mut *mut ::std::os::raw::c_void, - size: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get Device pointer from Host Pointer allocated through hipHostMalloc\n\n @param[out] devPtr Device Pointer mapped to passed host pointer\n @param[in] hstPtr Host Pointer allocated through hipHostMalloc\n @param[in] flags Flags to be passed for extension\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory\n\n @see hipSetDeviceFlags, hipHostMalloc"] - pub fn hipHostGetDevicePointer( - devPtr: *mut *mut ::std::os::raw::c_void, - hstPtr: *mut ::std::os::raw::c_void, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return flags associated with host pointer\n\n @param[out] flagsPtr Memory location to store flags\n @param[in] hostPtr Host Pointer allocated through hipHostMalloc\n @return #hipSuccess, #hipErrorInvalidValue\n\n @see hipHostMalloc"] - pub fn hipHostGetFlags( - flagsPtr: *mut ::std::os::raw::c_uint, - hostPtr: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Register host memory so it can be accessed from the current device.\n\n @param[out] hostPtr Pointer to host memory to be registered.\n @param[in] sizeBytes Size of the host memory\n @param[in] flags See below.\n\n Flags:\n - #hipHostRegisterDefault Memory is Mapped and Portable\n - #hipHostRegisterPortable Memory is considered registered by all contexts. HIP only supports\n one context so this is always assumed true.\n - #hipHostRegisterMapped Map the allocation into the address space for the current device.\n The device pointer can be obtained with #hipHostGetDevicePointer.\n\n\n After registering the memory, use #hipHostGetDevicePointer to obtain the mapped device pointer.\n On many systems, the mapped device pointer will have a different value than the mapped host\n pointer. Applications must use the device pointer in device code, and the host pointer in device\n code.\n\n On some systems, registered memory is pinned. On some systems, registered memory may not be\n actually be pinned but uses OS or hardware facilities to all GPU access to the host memory.\n\n Developers are strongly encouraged to register memory blocks which are aligned to the host\n cache-line size. (typically 64-bytes but can be obtains from the CPUID instruction).\n\n If registering non-aligned pointers, the application must take care when register pointers from\n the same cache line on different devices. HIP's coarse-grained synchronization model does not\n guarantee correct results if different devices write to different parts of the same cache block -\n typically one of the writes will \"win\" and overwrite data from the other registered memory\n region.\n\n @return #hipSuccess, #hipErrorOutOfMemory\n\n @see hipHostUnregister, hipHostGetFlags, hipHostGetDevicePointer"] - pub fn hipHostRegister( - hostPtr: *mut ::std::os::raw::c_void, - sizeBytes: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Un-register host pointer\n\n @param[in] hostPtr Host pointer previously registered with #hipHostRegister\n @return Error code\n\n @see hipHostRegister"] - pub fn hipHostUnregister(hostPtr: *mut ::std::os::raw::c_void) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " Allocates at least width (in bytes) * height bytes of linear memory\n Padding may occur to ensure alighnment requirements are met for the given row\n The change in width size due to padding will be returned in *pitch.\n Currently the alignment is set to 128 bytes\n\n @param[out] ptr Pointer to the allocated device memory\n @param[out] pitch Pitch for allocation (in bytes)\n @param[in] width Requested pitched allocation width (in bytes)\n @param[in] height Requested pitched allocation height\n\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n\n @return Error code\n\n @see hipMalloc, hipFree, hipMallocArray, hipFreeArray, hipHostFree, hipMalloc3D,\n hipMalloc3DArray, hipHostMalloc"] - pub fn hipMallocPitch( - ptr: *mut *mut ::std::os::raw::c_void, - pitch: *mut usize, - width: usize, - height: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " Allocates at least width (in bytes) * height bytes of linear memory\n Padding may occur to ensure alighnment requirements are met for the given row\n The change in width size due to padding will be returned in *pitch.\n Currently the alignment is set to 128 bytes\n\n @param[out] dptr Pointer to the allocated device memory\n @param[out] pitch Pitch for allocation (in bytes)\n @param[in] widthInBytes Requested pitched allocation width (in bytes)\n @param[in] height Requested pitched allocation height\n @param[in] elementSizeBytes The size of element bytes, should be 4, 8 or 16\n\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n The intended usage of pitch is as a separate parameter of the allocation, used to compute addresses within the 2D array.\n Given the row and column of an array element of type T, the address is computed as:\n T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column;\n\n @return Error code\n\n @see hipMalloc, hipFree, hipMallocArray, hipFreeArray, hipHostFree, hipMalloc3D,\n hipMalloc3DArray, hipHostMalloc"] - pub fn hipMemAllocPitch( - dptr: *mut hipDeviceptr_t, - pitch: *mut usize, - widthInBytes: usize, - height: usize, - elementSizeBytes: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Free memory allocated by the hcc hip memory allocation API.\n This API performs an implicit hipDeviceSynchronize() call.\n If pointer is NULL, the hip runtime is initialized and hipSuccess is returned.\n\n @param[in] ptr Pointer to memory to be freed\n @return #hipSuccess\n @return #hipErrorInvalidDevicePointer (if pointer is invalid, including host pointers allocated\n with hipHostMalloc)\n\n @see hipMalloc, hipMallocPitch, hipMallocArray, hipFreeArray, hipHostFree, hipMalloc3D,\n hipMalloc3DArray, hipHostMalloc"] - pub fn hipFree(ptr: *mut ::std::os::raw::c_void) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Free memory allocated by the hcc hip host memory allocation API [Deprecated]\n\n @param[in] ptr Pointer to memory to be freed\n @return #hipSuccess,\n #hipErrorInvalidValue (if pointer is invalid, including device pointers allocated\n with hipMalloc)\n\n @warning This API is deprecated, use hipHostFree() instead"] - pub fn hipFreeHost(ptr: *mut ::std::os::raw::c_void) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Free memory allocated by the hcc hip host memory allocation API\n This API performs an implicit hipDeviceSynchronize() call.\n If pointer is NULL, the hip runtime is initialized and hipSuccess is returned.\n\n @param[in] ptr Pointer to memory to be freed\n @return #hipSuccess,\n #hipErrorInvalidValue (if pointer is invalid, including device pointers allocated with\n hipMalloc)\n\n @see hipMalloc, hipMallocPitch, hipFree, hipMallocArray, hipFreeArray, hipMalloc3D,\n hipMalloc3DArray, hipHostMalloc"] - pub fn hipHostFree(ptr: *mut ::std::os::raw::c_void) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from src to dst.\n\n It supports memory from host to device,\n device to host, device to device and host to host\n The src and dst must not overlap.\n\n For hipMemcpy, the copy is always performed by the current device (set by hipSetDevice).\n For multi-gpu or peer-to-peer configurations, it is recommended to set the current device to the\n device where the src data is physically located. For optimal peer-to-peer copies, the copy device\n must be able to access the src and dst pointers (by calling hipDeviceEnablePeerAccess with copy\n agent as the current device and src/dest as the peerDevice argument. if this is not done, the\n hipMemcpy will still work, but will perform the copy using a staging buffer on the host.\n Calling hipMemcpy with dst and src pointers that do not match the hipMemcpyKind results in\n undefined behavior.\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n @param[in] kind Kind of transfer\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown\n\n @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost,\n hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA,\n hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD,\n hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync,\n hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo,\n hipMemHostAlloc, hipMemHostGetDevicePointer"] - pub fn hipMemcpy( - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Memory copy on the stream.\n It allows single or multiple devices to do memory copy on single or multiple streams.\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n @param[in] kind Kind of transfer\n @param[in] stream Valid stream\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown, #hipErrorContextIsDestroyed\n\n @see hipMemcpy, hipStreamCreate, hipStreamSynchronize, hipStreamDestroy, hipSetDevice, hipLaunchKernelGGL\n"] - pub fn hipMemcpyWithStream( - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from Host to Device\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue\n\n @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost,\n hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA,\n hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD,\n hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync,\n hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo,\n hipMemHostAlloc, hipMemHostGetDevicePointer"] - pub fn hipMemcpyHtoD( - dst: hipDeviceptr_t, - src: *mut ::std::os::raw::c_void, - sizeBytes: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from Device to Host\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue\n\n @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost,\n hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA,\n hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD,\n hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync,\n hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo,\n hipMemHostAlloc, hipMemHostGetDevicePointer"] - pub fn hipMemcpyDtoH( - dst: *mut ::std::os::raw::c_void, - src: hipDeviceptr_t, - sizeBytes: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from Device to Device\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue\n\n @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost,\n hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA,\n hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD,\n hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync,\n hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo,\n hipMemHostAlloc, hipMemHostGetDevicePointer"] - pub fn hipMemcpyDtoD(dst: hipDeviceptr_t, src: hipDeviceptr_t, sizeBytes: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from Host to Device asynchronously\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n @param[in] stream Stream identifier\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue\n\n @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost,\n hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA,\n hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD,\n hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync,\n hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo,\n hipMemHostAlloc, hipMemHostGetDevicePointer"] - pub fn hipMemcpyHtoDAsync( - dst: hipDeviceptr_t, - src: *mut ::std::os::raw::c_void, - sizeBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from Device to Host asynchronously\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n @param[in] stream Stream identifier\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue\n\n @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost,\n hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA,\n hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD,\n hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync,\n hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo,\n hipMemHostAlloc, hipMemHostGetDevicePointer"] - pub fn hipMemcpyDtoHAsync( - dst: *mut ::std::os::raw::c_void, - src: hipDeviceptr_t, - sizeBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from Device to Device asynchronously\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n @param[in] stream Stream identifier\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue\n\n @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost,\n hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA,\n hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD,\n hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync,\n hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo,\n hipMemHostAlloc, hipMemHostGetDevicePointer"] - pub fn hipMemcpyDtoDAsync( - dst: hipDeviceptr_t, - src: hipDeviceptr_t, - sizeBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a global pointer from a module.\n Returns in *dptr and *bytes the pointer and size of the global of name name located in module hmod.\n If no variable of that name exists, it returns hipErrorNotFound. Both parameters dptr and bytes are optional.\n If one of them is NULL, it is ignored and hipSuccess is returned.\n\n @param[out] dptr Returns global device pointer\n @param[out] bytes Returns global size in bytes\n @param[in] hmod Module to retrieve global from\n @param[in] name Name of global to retrieve\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotFound, #hipErrorInvalidContext\n"] - pub fn hipModuleGetGlobal( - dptr: *mut hipDeviceptr_t, - bytes: *mut usize, - hmod: hipModule_t, - name: *const ::std::os::raw::c_char, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets device pointer associated with symbol on the device.\n\n @param[out] devPtr pointer to the device associated the symbole\n @param[in] symbol pointer to the symbole of the device\n\n @return #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGetSymbolAddress( - devPtr: *mut *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the size of the given symbol on the device.\n\n @param[in] symbol pointer to the device symbole\n @param[out] size pointer to the size\n\n @return #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGetSymbolSize(size: *mut usize, symbol: *const ::std::os::raw::c_void) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data to the given symbol on the device.\n Symbol HIP APIs allow a kernel to define a device-side data symbol which can be accessed on\n the host side. The symbol can be in __constant or device space.\n Note that the symbol name needs to be encased in the HIP_SYMBOL macro.\n This also applies to hipMemcpyFromSymbol, hipGetSymbolAddress, and hipGetSymbolSize.\n For detail usage, see the example at\n https://github.com/ROCm/HIP/blob/develop/docs/user_guide/hip_porting_guide.md\n\n @param[out] symbol pointer to the device symbole\n @param[in] src pointer to the source address\n @param[in] sizeBytes size in bytes to copy\n @param[in] offset offset in bytes from start of symbole\n @param[in] kind type of memory transfer\n\n @return #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipMemcpyToSymbol( - symbol: *const ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data to the given symbol on the device asynchronously.\n\n @param[out] symbol pointer to the device symbole\n @param[in] src pointer to the source address\n @param[in] sizeBytes size in bytes to copy\n @param[in] offset offset in bytes from start of symbole\n @param[in] kind type of memory transfer\n @param[in] stream stream identifier\n\n @return #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipMemcpyToSymbolAsync( - symbol: *const ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data from the given symbol on the device.\n\n @param[out] dst Returns pointer to destinition memory address\n @param[in] symbol Pointer to the symbole address on the device\n @param[in] sizeBytes Size in bytes to copy\n @param[in] offset Offset in bytes from the start of symbole\n @param[in] kind Type of memory transfer\n\n @return #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipMemcpyFromSymbol( - dst: *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data from the given symbol on the device asynchronously.\n\n @param[out] dst Returns pointer to destinition memory address\n @param[in] symbol pointer to the symbole address on the device\n @param[in] sizeBytes size in bytes to copy\n @param[in] offset offset in bytes from the start of symbole\n @param[in] kind type of memory transfer\n @param[in] stream stream identifier\n\n @return #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipMemcpyFromSymbolAsync( - dst: *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copy data from src to dst asynchronously.\n\n @warning If host or dest are not pinned, the memory copy will be performed synchronously. For\n best performance, use hipHostMalloc to allocate host memory that is transferred asynchronously.\n\n @warning on HCC hipMemcpyAsync does not support overlapped H2D and D2H copies.\n For hipMemcpy, the copy is always performed by the device associated with the specified stream.\n\n For multi-gpu or peer-to-peer configurations, it is recommended to use a stream which is a\n attached to the device where the src data is physically located. For optimal peer-to-peer copies,\n the copy device must be able to access the src and dst pointers (by calling\n hipDeviceEnablePeerAccess with copy agent as the current device and src/dest as the peerDevice\n argument. if this is not done, the hipMemcpy will still work, but will perform the copy using a\n staging buffer on the host.\n\n @param[out] dst Data being copy to\n @param[in] src Data being copy from\n @param[in] sizeBytes Data size in bytes\n @param[in] kind Type of memory transfer\n @param[in] stream Stream identifier\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown\n\n @see hipMemcpy, hipMemcpy2D, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray,\n hipMemcpy2DFromArray, hipMemcpyArrayToArray, hipMemcpy2DArrayToArray, hipMemcpyToSymbol,\n hipMemcpyFromSymbol, hipMemcpy2DAsync, hipMemcpyToArrayAsync, hipMemcpy2DToArrayAsync,\n hipMemcpyFromArrayAsync, hipMemcpy2DFromArrayAsync, hipMemcpyToSymbolAsync,\n hipMemcpyFromSymbolAsync"] - pub fn hipMemcpyAsync( - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant\n byte value value.\n\n @param[out] dst Data being filled\n @param[in] value Value to be set\n @param[in] sizeBytes Data size in bytes\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized"] - pub fn hipMemset( - dst: *mut ::std::os::raw::c_void, - value: ::std::os::raw::c_int, - sizeBytes: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant\n byte value value.\n\n @param[out] dest Data ptr to be filled\n @param[in] value Value to be set\n @param[in] count Number of values to be set\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized"] - pub fn hipMemsetD8( - dest: hipDeviceptr_t, - value: ::std::os::raw::c_uchar, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant\n byte value value.\n\n hipMemsetD8Async() is asynchronous with respect to the host, so the call may return before the\n memset is complete. The operation can optionally be associated to a stream by passing a non-zero\n stream argument. If stream is non-zero, the operation may overlap with operations in other\n streams.\n\n @param[out] dest Data ptr to be filled\n @param[in] value Constant value to be set\n @param[in] count Number of values to be set\n @param[in] stream Stream identifier\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized"] - pub fn hipMemsetD8Async( - dest: hipDeviceptr_t, - value: ::std::os::raw::c_uchar, - count: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant\n short value value.\n\n @param[out] dest Data ptr to be filled\n @param[in] value Constant value to be set\n @param[in] count Number of values to be set\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized"] - pub fn hipMemsetD16( - dest: hipDeviceptr_t, - value: ::std::os::raw::c_ushort, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant\n short value value.\n\n hipMemsetD16Async() is asynchronous with respect to the host, so the call may return before the\n memset is complete. The operation can optionally be associated to a stream by passing a non-zero\n stream argument. If stream is non-zero, the operation may overlap with operations in other\n streams.\n\n @param[out] dest Data ptr to be filled\n @param[in] value Constant value to be set\n @param[in] count Number of values to be set\n @param[in] stream Stream identifier\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized"] - pub fn hipMemsetD16Async( - dest: hipDeviceptr_t, - value: ::std::os::raw::c_ushort, - count: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the memory area pointed to by dest with the constant integer\n value for specified number of times.\n\n @param[out] dest Data being filled\n @param[in] value Constant value to be set\n @param[in] count Number of values to be set\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized"] - pub fn hipMemsetD32( - dest: hipDeviceptr_t, - value: ::std::os::raw::c_int, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the first sizeBytes bytes of the memory area pointed to by dev with the constant\n byte value value.\n\n hipMemsetAsync() is asynchronous with respect to the host, so the call may return before the\n memset is complete. The operation can optionally be associated to a stream by passing a non-zero\n stream argument. If stream is non-zero, the operation may overlap with operations in other\n streams.\n\n @param[out] dst Pointer to device memory\n @param[in] value Value to set for each byte of specified memory\n @param[in] sizeBytes Size in bytes to set\n @param[in] stream Stream identifier\n @return #hipSuccess, #hipErrorInvalidValue"] - pub fn hipMemsetAsync( - dst: *mut ::std::os::raw::c_void, - value: ::std::os::raw::c_int, - sizeBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the memory area pointed to by dev with the constant integer\n value for specified number of times.\n\n hipMemsetD32Async() is asynchronous with respect to the host, so the call may return before the\n memset is complete. The operation can optionally be associated to a stream by passing a non-zero\n stream argument. If stream is non-zero, the operation may overlap with operations in other\n streams.\n\n @param[out] dst Pointer to device memory\n @param[in] value Value to set for each byte of specified memory\n @param[in] count Number of values to be set\n @param[in] stream Stream identifier\n @return #hipSuccess, #hipErrorInvalidValue"] - pub fn hipMemsetD32Async( - dst: hipDeviceptr_t, - value: ::std::os::raw::c_int, - count: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills the memory area pointed to by dst with the constant value.\n\n @param[out] dst Pointer to device memory\n @param[in] pitch Data size in bytes\n @param[in] value Constant value to be set\n @param[in] width\n @param[in] height\n @return #hipSuccess, #hipErrorInvalidValue"] - pub fn hipMemset2D( - dst: *mut ::std::os::raw::c_void, - pitch: usize, - value: ::std::os::raw::c_int, - width: usize, - height: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills asynchronously the memory area pointed to by dst with the constant value.\n\n @param[in] dst Pointer to 2D device memory\n @param[in] pitch Pitch size in bytes\n @param[in] value Value to be set for each byte of specified memory\n @param[in] width Width of matrix set columns in bytes\n @param[in] height Height of matrix set rows in bytes\n @param[in] stream Stream identifier\n @return #hipSuccess, #hipErrorInvalidValue"] - pub fn hipMemset2DAsync( - dst: *mut ::std::os::raw::c_void, - pitch: usize, - value: ::std::os::raw::c_int, - width: usize, - height: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills synchronously the memory area pointed to by pitchedDevPtr with the constant value.\n\n @param[in] pitchedDevPtr Pointer to pitched device memory\n @param[in] value Value to set for each byte of specified memory\n @param[in] extent Size parameters for width field in bytes in device memory\n @return #hipSuccess, #hipErrorInvalidValue"] - pub fn hipMemset3D( - pitchedDevPtr: hipPitchedPtr, - value: ::std::os::raw::c_int, - extent: hipExtent, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Fills asynchronously the memory area pointed to by pitchedDevPtr with the constant value.\n\n @param[in] pitchedDevPtr Pointer to pitched device memory\n @param[in] value Value to set for each byte of specified memory\n @param[in] extent Size parameters for width field in bytes in device memory\n @param[in] stream Stream identifier\n @return #hipSuccess, #hipErrorInvalidValue"] - pub fn hipMemset3DAsync( - pitchedDevPtr: hipPitchedPtr, - value: ::std::os::raw::c_int, - extent: hipExtent, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Query memory info.\n\n On ROCM, this function gets the actual free memory left on the current device, so supports\n the cases while running multi-workload (such as multiple processes, multiple threads, and\n multiple GPUs).\n\n @warning On Windows, the free memory only accounts for memory allocated by this process and may\n be optimistic.\n\n @param[out] free Returns free memory on the current device in bytes\n @param[out] total Returns total allocatable memory on the current device in bytes\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n"] - pub fn hipMemGetInfo(free: *mut usize, total: *mut usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get allocated memory size via memory pointer.\n\n This function gets the allocated shared virtual memory size from memory pointer.\n\n @param[in] ptr Pointer to allocated memory\n @param[out] size Returns the allocated memory size in bytes\n\n @return #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipMemPtrGetInfo(ptr: *mut ::std::os::raw::c_void, size: *mut usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocate an array on the device.\n\n @param[out] array Pointer to allocated array in device memory\n @param[in] desc Requested channel format\n @param[in] width Requested array allocation width\n @param[in] height Requested array allocation height\n @param[in] flags Requested properties of allocated array\n @return #hipSuccess, #hipErrorOutOfMemory\n\n @see hipMalloc, hipMallocPitch, hipFree, hipFreeArray, hipHostMalloc, hipHostFree"] - pub fn hipMallocArray( - array: *mut hipArray_t, - desc: *const hipChannelFormatDesc, - width: usize, - height: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create an array memory pointer on the device.\n\n @param[out] pHandle Pointer to the array memory\n @param[in] pAllocateArray Requested array desciptor\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @see hipMallocArray, hipArrayDestroy, hipFreeArray"] - pub fn hipArrayCreate( - pHandle: *mut hipArray_t, - pAllocateArray: *const HIP_ARRAY_DESCRIPTOR, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroy an array memory pointer on the device.\n\n @param[in] array Pointer to the array memory\n\n @return #hipSuccess, #hipErrorInvalidValue\n\n @see hipArrayCreate, hipArrayDestroy, hipFreeArray"] - pub fn hipArrayDestroy(array: hipArray_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create a 3D array memory pointer on the device.\n\n @param[out] array Pointer to the 3D array memory\n @param[in] pAllocateArray Requested array desciptor\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @see hipMallocArray, hipArrayDestroy, hipFreeArray"] - pub fn hipArray3DCreate( - array: *mut hipArray_t, - pAllocateArray: *const HIP_ARRAY3D_DESCRIPTOR, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create a 3D memory pointer on the device.\n\n @param[out] pitchedDevPtr Pointer to the 3D memory\n @param[in] extent Requested extent\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @see hipMallocPitch, hipMemGetInfo, hipFree"] - pub fn hipMalloc3D(pitchedDevPtr: *mut hipPitchedPtr, extent: hipExtent) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Frees an array on the device.\n\n @param[in] array Pointer to array to free\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized\n\n @see hipMalloc, hipMallocPitch, hipFree, hipMallocArray, hipHostMalloc, hipHostFree"] - pub fn hipFreeArray(array: hipArray_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocate an array on the device.\n\n @param[out] array Pointer to allocated array in device memory\n @param[in] desc Requested channel format\n @param[in] extent Requested array allocation width, height and depth\n @param[in] flags Requested properties of allocated array\n @return #hipSuccess, #hipErrorOutOfMemory\n\n @see hipMalloc, hipMallocPitch, hipFree, hipFreeArray, hipHostMalloc, hipHostFree"] - pub fn hipMalloc3DArray( - array: *mut hipArray_t, - desc: *const hipChannelFormatDesc, - extent: hipExtent, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets info about the specified array\n\n @param[out] desc - Returned array type\n @param[out] extent - Returned array shape. 2D arrays will have depth of zero\n @param[out] flags - Returned array flags\n @param[in] array - The HIP array to get info for\n\n @return #hipSuccess, #hipErrorInvalidValue #hipErrorInvalidHandle\n\n @see hipArrayGetDescriptor, hipArray3DGetDescriptor"] - pub fn hipArrayGetInfo( - desc: *mut hipChannelFormatDesc, - extent: *mut hipExtent, - flags: *mut ::std::os::raw::c_uint, - array: hipArray_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets a 1D or 2D array descriptor\n\n @param[out] pArrayDescriptor - Returned array descriptor\n @param[in] array - Array to get descriptor of\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue #hipErrorInvalidHandle\n\n @see hipArray3DCreate, hipArray3DGetDescriptor, hipArrayCreate, hipArrayDestroy, hipMemAlloc,\n hipMemAllocHost, hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned,\n hipMemcpy3D, hipMemcpy3DAsync, hipMemcpyAtoA, hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync,\n hipMemcpyDtoA, hipMemcpyDtoD, hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync,\n hipMemcpyHtoA, hipMemcpyHtoAAsync, hipMemcpyHtoD, hipMemcpyHtoDAsync, hipMemFree,\n hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, hipMemHostAlloc,\n hipMemHostGetDevicePointer, hipMemsetD8, hipMemsetD16, hipMemsetD32, hipArrayGetInfo"] - pub fn hipArrayGetDescriptor( - pArrayDescriptor: *mut HIP_ARRAY_DESCRIPTOR, - array: hipArray_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets a 3D array descriptor\n\n @param[out] pArrayDescriptor - Returned 3D array descriptor\n @param[in] array - 3D array to get descriptor of\n\n @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidValue #hipErrorInvalidHandle, #hipErrorContextIsDestroyed\n\n @see hipArray3DCreate, hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc,\n hipMemAllocHost, hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned,\n hipMemcpy3D, hipMemcpy3DAsync, hipMemcpyAtoA, hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync,\n hipMemcpyDtoA, hipMemcpyDtoD, hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync,\n hipMemcpyHtoA, hipMemcpyHtoAAsync, hipMemcpyHtoD, hipMemcpyHtoDAsync, hipMemFree,\n hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, hipMemHostAlloc,\n hipMemHostGetDevicePointer, hipMemsetD8, hipMemsetD16, hipMemsetD32, hipArrayGetInfo"] - pub fn hipArray3DGetDescriptor( - pArrayDescriptor: *mut HIP_ARRAY3D_DESCRIPTOR, - array: hipArray_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] dpitch Pitch of destination memory\n @param[in] src Source memory address\n @param[in] spitch Pitch of source memory\n @param[in] width Width of matrix transfer (columns in bytes)\n @param[in] height Height of matrix transfer (rows)\n @param[in] kind Type of transfer\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy2D( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies memory for 2D arrays.\n @param[in] pCopy Parameters for the memory copy\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2D, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray,\n hipMemcpyToSymbol, hipMemcpyAsync"] - pub fn hipMemcpyParam2D(pCopy: *const hip_Memcpy2D) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies memory for 2D arrays.\n @param[in] pCopy Parameters for the memory copy\n @param[in] stream Stream to use\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2D, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray,\n hipMemcpyToSymbol, hipMemcpyAsync"] - pub fn hipMemcpyParam2DAsync(pCopy: *const hip_Memcpy2D, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] dpitch Pitch of destination memory\n @param[in] src Source memory address\n @param[in] spitch Pitch of source memory\n @param[in] width Width of matrix transfer (columns in bytes)\n @param[in] height Height of matrix transfer (rows)\n @param[in] kind Type of transfer\n @param[in] stream Stream to use\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy2DAsync( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] wOffset Destination starting X offset\n @param[in] hOffset Destination starting Y offset\n @param[in] src Source memory address\n @param[in] spitch Pitch of source memory\n @param[in] width Width of matrix transfer (columns in bytes)\n @param[in] height Height of matrix transfer (rows)\n @param[in] kind Type of transfer\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpyToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy2DToArray( - dst: hipArray_t, - wOffset: usize, - hOffset: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] wOffset Destination starting X offset\n @param[in] hOffset Destination starting Y offset\n @param[in] src Source memory address\n @param[in] spitch Pitch of source memory\n @param[in] width Width of matrix transfer (columns in bytes)\n @param[in] height Height of matrix transfer (rows)\n @param[in] kind Type of transfer\n @param[in] stream Accelerator view which the copy is being enqueued\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpyToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy2DToArrayAsync( - dst: hipArray_t, - wOffset: usize, - hOffset: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] wOffset Destination starting X offset\n @param[in] hOffset Destination starting Y offset\n @param[in] src Source memory address\n @param[in] count size in bytes to copy\n @param[in] kind Type of transfer\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync\n @warning This API is deprecated."] - pub fn hipMemcpyToArray( - dst: hipArray_t, - wOffset: usize, - hOffset: usize, - src: *const ::std::os::raw::c_void, - count: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] srcArray Source memory address\n @param[in] wOffset Source starting X offset\n @param[in] hOffset Source starting Y offset\n @param[in] count Size in bytes to copy\n @param[in] kind Type of transfer\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync\n @warning This API is deprecated."] - pub fn hipMemcpyFromArray( - dst: *mut ::std::os::raw::c_void, - srcArray: hipArray_const_t, - wOffset: usize, - hOffset: usize, - count: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] dpitch Pitch of destination memory\n @param[in] src Source memory address\n @param[in] wOffset Source starting X offset\n @param[in] hOffset Source starting Y offset\n @param[in] width Width of matrix transfer (columns in bytes)\n @param[in] height Height of matrix transfer (rows)\n @param[in] kind Type of transfer\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy2DFromArray( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: hipArray_const_t, - wOffset: usize, - hOffset: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device asynchronously.\n\n @param[in] dst Destination memory address\n @param[in] dpitch Pitch of destination memory\n @param[in] src Source memory address\n @param[in] wOffset Source starting X offset\n @param[in] hOffset Source starting Y offset\n @param[in] width Width of matrix transfer (columns in bytes)\n @param[in] height Height of matrix transfer (rows)\n @param[in] kind Type of transfer\n @param[in] stream Accelerator view which the copy is being enqueued\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy2DFromArrayAsync( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: hipArray_const_t, - wOffset: usize, - hOffset: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dst Destination memory address\n @param[in] srcArray Source array\n @param[in] srcOffset Offset in bytes of source array\n @param[in] count Size of memory copy in bytes\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpyAtoH( - dst: *mut ::std::os::raw::c_void, - srcArray: hipArray_t, - srcOffset: usize, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] dstArray Destination memory address\n @param[in] dstOffset Offset in bytes of destination array\n @param[in] srcHost Source host pointer\n @param[in] count Size of memory copy in bytes\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpyHtoA( - dstArray: hipArray_t, - dstOffset: usize, - srcHost: *const ::std::os::raw::c_void, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] p 3D memory copy parameters\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy3D(p: *const hipMemcpy3DParms) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device asynchronously.\n\n @param[in] p 3D memory copy parameters\n @param[in] stream Stream to use\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipMemcpy3DAsync(p: *const hipMemcpy3DParms, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device.\n\n @param[in] pCopy 3D memory copy parameters\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipDrvMemcpy3D(pCopy: *const HIP_MEMCPY3D) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies data between host and device asynchronously.\n\n @param[in] pCopy 3D memory copy parameters\n @param[in] stream Stream to use\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue,\n #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection\n\n @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol,\n hipMemcpyAsync"] - pub fn hipDrvMemcpy3DAsync(pCopy: *const HIP_MEMCPY3D, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup PeerToPeer PeerToPeer Device Memory Access\n @{\n @warning PeerToPeer support is experimental.\n This section describes the PeerToPeer device memory access functions of HIP runtime API.\n/\n/**\n @brief Determine if a device can access a peer's memory.\n\n @param [out] canAccessPeer Returns the peer access capability (0 or 1)\n @param [in] deviceId - device from where memory may be accessed.\n @param [in] peerDeviceId - device where memory is physically located\n\n Returns \"1\" in @p canAccessPeer if the specified @p device is capable\n of directly accessing memory physically located on peerDevice , or \"0\" if not.\n\n Returns \"0\" in @p canAccessPeer if deviceId == peerDeviceId, and both are valid devices : a\n device is not a peer of itself.\n\n @returns #hipSuccess,\n @returns #hipErrorInvalidDevice if deviceId or peerDeviceId are not valid devices"] - pub fn hipDeviceCanAccessPeer( - canAccessPeer: *mut ::std::os::raw::c_int, - deviceId: ::std::os::raw::c_int, - peerDeviceId: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Enable direct access from current device's virtual address space to memory allocations\n physically located on a peer device.\n\n Memory which already allocated on peer device will be mapped into the address space of the\n current device. In addition, all future memory allocations on peerDeviceId will be mapped into\n the address space of the current device when the memory is allocated. The peer memory remains\n accessible from the current device until a call to hipDeviceDisablePeerAccess or hipDeviceReset.\n\n\n @param [in] peerDeviceId Peer device to enable direct access to from the current device\n @param [in] flags Reserved for future use, must be zero\n\n Returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue,\n @returns #hipErrorPeerAccessAlreadyEnabled if peer access is already enabled for this device."] - pub fn hipDeviceEnablePeerAccess( - peerDeviceId: ::std::os::raw::c_int, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Disable direct access from current device's virtual address space to memory allocations\n physically located on a peer device.\n\n Returns hipErrorPeerAccessNotEnabled if direct access to memory on peerDevice has not yet been\n enabled from the current device.\n\n @param [in] peerDeviceId Peer device to disable direct access to\n\n @returns #hipSuccess, #hipErrorPeerAccessNotEnabled"] - pub fn hipDeviceDisablePeerAccess(peerDeviceId: ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get information on memory allocations.\n\n @param [out] pbase - BAse pointer address\n @param [out] psize - Size of allocation\n @param [in] dptr- Device Pointer\n\n @returns #hipSuccess, #hipErrorInvalidDevicePointer\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice"] - pub fn hipMemGetAddressRange( - pbase: *mut hipDeviceptr_t, - psize: *mut usize, - dptr: hipDeviceptr_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies memory from one device to memory on another device.\n\n @param [out] dst - Destination device pointer.\n @param [in] dstDeviceId - Destination device\n @param [in] src - Source device pointer\n @param [in] srcDeviceId - Source device\n @param [in] sizeBytes - Size of memory copy in bytes\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDevice"] - pub fn hipMemcpyPeer( - dst: *mut ::std::os::raw::c_void, - dstDeviceId: ::std::os::raw::c_int, - src: *const ::std::os::raw::c_void, - srcDeviceId: ::std::os::raw::c_int, - sizeBytes: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies memory from one device to memory on another device.\n\n @param [out] dst - Destination device pointer.\n @param [in] dstDeviceId - Destination device\n @param [in] src - Source device pointer\n @param [in] srcDevice - Source device\n @param [in] sizeBytes - Size of memory copy in bytes\n @param [in] stream - Stream identifier\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDevice"] - pub fn hipMemcpyPeerAsync( - dst: *mut ::std::os::raw::c_void, - dstDeviceId: ::std::os::raw::c_int, - src: *const ::std::os::raw::c_void, - srcDevice: ::std::os::raw::c_int, - sizeBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create a context and set it as current/default context\n\n @param [out] ctx Context to create\n @param [in] flags Context creation flags\n @param [in] device device handle\n\n @return #hipSuccess\n\n @see hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, hipCtxPushCurrent,\n hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform.\n"] - pub fn hipCtxCreate( - ctx: *mut hipCtx_t, - flags: ::std::os::raw::c_uint, - device: hipDevice_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroy a HIP context.\n\n @param [in] ctx Context to destroy\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @see hipCtxCreate, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,hipCtxSetCurrent,\n hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize , hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxDestroy(ctx: hipCtx_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Pop the current/default context and return the popped context.\n\n @param [out] ctx The current context to pop\n\n @returns #hipSuccess, #hipErrorInvalidContext\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxSetCurrent, hipCtxGetCurrent,\n hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxPopCurrent(ctx: *mut hipCtx_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Push the context to be set as current/ default context\n\n @param [in] ctx The current context to push\n\n @returns #hipSuccess, #hipErrorInvalidContext\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize , hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxPushCurrent(ctx: hipCtx_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set the passed context as current/default\n\n @param [in] ctx The context to set as current\n\n @returns #hipSuccess, #hipErrorInvalidContext\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize , hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxSetCurrent(ctx: hipCtx_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get the handle of the current/ default context\n\n @param [out] ctx The context to get as current\n\n @returns #hipSuccess, #hipErrorInvalidContext\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetDevice, hipCtxGetFlags, hipCtxPopCurrent,\n hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxGetCurrent(ctx: *mut hipCtx_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get the handle of the device associated with current/default context\n\n @param [out] device The device from the current context\n\n @returns #hipSuccess, #hipErrorInvalidContext\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxGetDevice(device: *mut hipDevice_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the approximate HIP api version.\n\n @param [in] ctx Context to check\n @param [out] apiVersion API version to get\n\n @return #hipSuccess\n\n @warning The HIP feature set does not correspond to an exact CUDA SDK api revision.\n This function always set *apiVersion to 4 as an approximation though HIP supports\n some features which were introduced in later CUDA SDK revisions.\n HIP apps code should not rely on the api revision number here and should\n use arch feature flags to test device capabilities or conditional compilation.\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetDevice, hipCtxGetFlags, hipCtxPopCurrent,\n hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxGetApiVersion(ctx: hipCtx_t, apiVersion: *mut ::std::os::raw::c_int) - -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get Cache configuration for a specific function\n\n @param [out] cacheConfig Cache configuration\n\n @return #hipSuccess\n\n @warning AMD devices and some Nvidia GPUS do not support reconfigurable cache. This hint is\n ignored on those architectures.\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxGetCacheConfig(cacheConfig: *mut hipFuncCache_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set L1/Shared cache partition.\n\n @param [in] cacheConfig Cache configuration to set\n\n @return #hipSuccess\n\n @warning AMD devices and some Nvidia GPUS do not support reconfigurable cache. This hint is\n ignored on those architectures.\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxSetCacheConfig(cacheConfig: hipFuncCache_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set Shared memory bank configuration.\n\n @param [in] config Shared memory configuration to set\n\n @return #hipSuccess\n\n @warning AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is\n ignored on those architectures.\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxSetSharedMemConfig(config: hipSharedMemConfig) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get Shared memory bank configuration.\n\n @param [out] pConfig Pointer of shared memory configuration\n\n @return #hipSuccess\n\n @warning AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is\n ignored on those architectures.\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxGetSharedMemConfig(pConfig: *mut hipSharedMemConfig) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Blocks until the default context has completed all preceding requested tasks.\n\n @return #hipSuccess\n\n @warning This function waits for all streams on the default context to complete execution, and\n then returns.\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxSynchronize() -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Return flags used for creating default context.\n\n @param [out] flags Pointer of flags\n\n @returns #hipSuccess\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxPopCurrent, hipCtxGetCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxGetFlags(flags: *mut ::std::os::raw::c_uint) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Enables direct access to memory allocations in a peer context.\n\n Memory which already allocated on peer device will be mapped into the address space of the\n current device. In addition, all future memory allocations on peerDeviceId will be mapped into\n the address space of the current device when the memory is allocated. The peer memory remains\n accessible from the current device until a call to hipDeviceDisablePeerAccess or hipDeviceReset.\n\n\n @param [in] peerCtx Peer context\n @param [in] flags flags, need to set as 0\n\n @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue,\n #hipErrorPeerAccessAlreadyEnabled\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n @warning PeerToPeer support is experimental.\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxEnablePeerAccess(peerCtx: hipCtx_t, flags: ::std::os::raw::c_uint) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Disable direct access from current context's virtual address space to memory allocations\n physically located on a peer context.Disables direct access to memory allocations in a peer\n context and unregisters any registered allocations.\n\n Returns #hipErrorPeerAccessNotEnabled if direct access to memory on peerDevice has not yet been\n enabled from the current device.\n\n @param [in] peerCtx Peer context to be disabled\n\n @returns #hipSuccess, #hipErrorPeerAccessNotEnabled\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n @warning PeerToPeer support is experimental.\n\n @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the\n NVIDIA platform."] - pub fn hipCtxDisablePeerAccess(peerCtx: hipCtx_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get the state of the primary context.\n\n @param [in] dev Device to get primary context flags for\n @param [out] flags Pointer to store flags\n @param [out] active Pointer to store context state; 0 = inactive, 1 = active\n\n @returns #hipSuccess\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent driver API on the\n NVIDIA platform."] - pub fn hipDevicePrimaryCtxGetState( - dev: hipDevice_t, - flags: *mut ::std::os::raw::c_uint, - active: *mut ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Release the primary context on the GPU.\n\n @param [in] dev Device which primary context is released\n\n @returns #hipSuccess\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n @warning This function return #hipSuccess though doesn't release the primaryCtx by design on\n HIP/HCC path.\n\n @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA\n platform."] - pub fn hipDevicePrimaryCtxRelease(dev: hipDevice_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Retain the primary context on the GPU.\n\n @param [out] pctx Returned context handle of the new context\n @param [in] dev Device which primary context is released\n\n @returns #hipSuccess\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA\n platform."] - pub fn hipDevicePrimaryCtxRetain(pctx: *mut hipCtx_t, dev: hipDevice_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Resets the primary context on the GPU.\n\n @param [in] dev Device which primary context is reset\n\n @returns #hipSuccess\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA\n platform."] - pub fn hipDevicePrimaryCtxReset(dev: hipDevice_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set flags for the primary context.\n\n @param [in] dev Device for which the primary context flags are set\n @param [in] flags New flags for the device\n\n @returns #hipSuccess, #hipErrorContextAlreadyInUse\n\n @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,\n hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice\n\n @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA\n platform."] - pub fn hipDevicePrimaryCtxSetFlags( - dev: hipDevice_t, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n\n @defgroup Module Module Management\n @{\n @ingroup API\n This section describes the module management functions of HIP runtime API.\n\n/\n/**\n @brief Loads code object from file into a module the currrent context.\n\n @param [in] fname Filename of code object to load\n\n @param [out] module Module\n\n @warning File/memory resources allocated in this function are released only in hipModuleUnload.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext, #hipErrorFileNotFound,\n #hipErrorOutOfMemory, #hipErrorSharedObjectInitFailed, #hipErrorNotInitialized\n"] - pub fn hipModuleLoad( - module: *mut hipModule_t, - fname: *const ::std::os::raw::c_char, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Frees the module\n\n @param [in] module Module to free\n\n @returns #hipSuccess, #hipErrorInvalidResourceHandle\n\n The module is freed, and the code objects associated with it are destroyed."] - pub fn hipModuleUnload(module: hipModule_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Function with kname will be extracted if present in module\n\n @param [in] module Module to get function from\n @param [in] kname Pointer to the name of function\n @param [out] function Pointer to function handle\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext, #hipErrorNotInitialized,\n #hipErrorNotFound,"] - pub fn hipModuleGetFunction( - function: *mut hipFunction_t, - module: hipModule_t, - kname: *const ::std::os::raw::c_char, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Find out attributes for a given function.\n\n @param [out] attr Attributes of funtion\n @param [in] func Pointer to the function handle\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDeviceFunction"] - pub fn hipFuncGetAttributes( - attr: *mut hipFuncAttributes, - func: *const ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Find out a specific attribute for a given function.\n\n @param [out] value Pointer to the value\n @param [in] attrib Attributes of the given funtion\n @param [in] hfunc Function to get attributes from\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDeviceFunction"] - pub fn hipFuncGetAttribute( - value: *mut ::std::os::raw::c_int, - attrib: hipFunction_attribute, - hfunc: hipFunction_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief returns the handle of the texture reference with the name from the module.\n\n @param [in] hmod Module\n @param [in] name Pointer of name of texture reference\n @param [out] texRef Pointer of texture reference\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorNotFound, #hipErrorInvalidValue"] - pub fn hipModuleGetTexRef( - texRef: *mut *mut textureReference, - hmod: hipModule_t, - name: *const ::std::os::raw::c_char, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief builds module from code object which resides in host memory. Image is pointer to that\n location.\n\n @param [in] image The pointer to the location of data\n @param [out] module Retuned module\n\n @returns hipSuccess, hipErrorNotInitialized, hipErrorOutOfMemory, hipErrorNotInitialized"] - pub fn hipModuleLoadData( - module: *mut hipModule_t, - image: *const ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief builds module from code object which resides in host memory. Image is pointer to that\n location. Options are not used. hipModuleLoadData is called.\n\n @param [in] image The pointer to the location of data\n @param [out] module Retuned module\n @param [in] numOptions Number of options\n @param [in] options Options for JIT\n @param [in] optionValues Option values for JIT\n\n @returns hipSuccess, hipErrorNotInitialized, hipErrorOutOfMemory, hipErrorNotInitialized"] - pub fn hipModuleLoadDataEx( - module: *mut hipModule_t, - image: *const ::std::os::raw::c_void, - numOptions: ::std::os::raw::c_uint, - options: *mut hipJitOption, - optionValues: *mut *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief launches kernel f with launch parameters and shared memory on stream with arguments passed\n to kernelparams or extra\n\n @param [in] f Kernel to launch.\n @param [in] gridDimX X grid dimension specified as multiple of blockDimX.\n @param [in] gridDimY Y grid dimension specified as multiple of blockDimY.\n @param [in] gridDimZ Z grid dimension specified as multiple of blockDimZ.\n @param [in] blockDimX X block dimensions specified in work-items\n @param [in] blockDimY Y grid dimension specified in work-items\n @param [in] blockDimZ Z grid dimension specified in work-items\n @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel. The\n HIP-Clang compiler provides support for extern shared declarations.\n @param [in] stream Stream where the kernel should be dispatched. May be 0, in which case th\n default stream is used with associated synchronization rules.\n @param [in] kernelParams Kernel parameters to launch\n @param [in] extra Pointer to kernel arguments. These are passed directly to the kernel and\n must be in the memory layout and alignment expected by the kernel.\n All passed arguments must be naturally aligned according to their type. The memory address of each\n argument should be a multiple of its size in bytes. Please refer to hip_porting_driver_api.md\n for sample usage.\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32. So gridDim.x * blockDim.x, gridDim.y * blockDim.y\n and gridDim.z * blockDim.z are always less than 2^32.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue"] - pub fn hipModuleLaunchKernel( - f: hipFunction_t, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - stream: hipStream_t, - kernelParams: *mut *mut ::std::os::raw::c_void, - extra: *mut *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief launches kernel f with launch parameters and shared memory on stream with arguments passed\n to kernelParams, where thread blocks can cooperate and synchronize as they execute\n\n @param [in] f Kernel to launch.\n @param [in] gridDimX X grid dimension specified as multiple of blockDimX.\n @param [in] gridDimY Y grid dimension specified as multiple of blockDimY.\n @param [in] gridDimZ Z grid dimension specified as multiple of blockDimZ.\n @param [in] blockDimX X block dimension specified in work-items.\n @param [in] blockDimY Y block dimension specified in work-items.\n @param [in] blockDimZ Z block dimension specified in work-items.\n @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel. The\n HIP-Clang compiler provides support for extern shared declarations.\n @param [in] stream Stream where the kernel should be dispatched. May be 0,\n in which case the default stream is used with associated synchronization rules.\n @param [in] kernelParams A list of kernel arguments.\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidHandle, #hipErrorInvalidImage, #hipErrorInvalidValue,\n #hipErrorInvalidConfiguration, #hipErrorLaunchFailure, #hipErrorLaunchOutOfResources,\n #hipErrorLaunchTimeOut, #hipErrorCooperativeLaunchTooLarge, #hipErrorSharedObjectInitFailed"] - pub fn hipModuleLaunchCooperativeKernel( - f: hipFunction_t, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - stream: hipStream_t, - kernelParams: *mut *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Launches kernels on multiple devices where thread blocks can cooperate and\n synchronize as they execute.\n\n @param [in] launchParamsList List of launch parameters, one per device.\n @param [in] numDevices Size of the launchParamsList array.\n @param [in] flags Flags to control launch behavior.\n\n @returns #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext,\n #hipErrorInvalidHandle, #hipErrorInvalidImage, #hipErrorInvalidValue,\n #hipErrorInvalidConfiguration, #hipErrorInvalidResourceHandle, #hipErrorLaunchFailure,\n #hipErrorLaunchOutOfResources, #hipErrorLaunchTimeOut, #hipErrorCooperativeLaunchTooLarge,\n #hipErrorSharedObjectInitFailed"] - pub fn hipModuleLaunchCooperativeKernelMultiDevice( - launchParamsList: *mut hipFunctionLaunchParams, - numDevices: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief launches kernel f with launch parameters and shared memory on stream with arguments passed\n to kernelparams or extra, where thread blocks can cooperate and synchronize as they execute\n\n @param [in] f Kernel to launch.\n @param [in] gridDim Grid dimensions specified as multiple of blockDim.\n @param [in] blockDimX Block dimensions specified in work-items\n @param [in] kernelParams A list of kernel arguments\n @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel. The\n HIP-Clang compiler provides support for extern shared declarations.\n @param [in] stream Stream where the kernel should be dispatched. May be 0, in which case th\n default stream is used with associated synchronization rules.\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue, #hipErrorCooperativeLaunchTooLarge"] - pub fn hipLaunchCooperativeKernel( - f: *const ::std::os::raw::c_void, - gridDim: dim3, - blockDimX: dim3, - kernelParams: *mut *mut ::std::os::raw::c_void, - sharedMemBytes: ::std::os::raw::c_uint, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Launches kernels on multiple devices where thread blocks can cooperate and\n synchronize as they execute.\n\n @param [in] launchParamsList List of launch parameters, one per device.\n @param [in] numDevices Size of the launchParamsList array.\n @param [in] flags Flags to control launch behavior.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue,\n #hipErrorCooperativeLaunchTooLarge"] - pub fn hipLaunchCooperativeKernelMultiDevice( - launchParamsList: *mut hipLaunchParams, - numDevices: ::std::os::raw::c_int, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Launches kernels on multiple devices and guarantees all specified kernels are dispatched\n on respective streams before enqueuing any other work on the specified streams from any other threads\n\n\n @param [in] launchParamsList List of launch parameters, one per device.\n @param [in] numDevices Size of the launchParamsList array.\n @param [in] flags Flags to control launch behavior.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue"] - pub fn hipExtLaunchMultiKernelMultiDevice( - launchParamsList: *mut hipLaunchParams, - numDevices: ::std::os::raw::c_int, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = "-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Occupancy Occupancy\n @{\n This section describes the occupancy functions of HIP runtime API.\n\n/\n/**\n @brief determine the grid and block sizes to achieves maximum occupancy for a kernel\n\n @param [out] gridSize minimum grid size for maximum potential occupancy\n @param [out] blockSize block size for maximum potential occupancy\n @param [in] f kernel function for which occupancy is calulated\n @param [in] dynSharedMemPerBlk dynamic shared memory usage (in bytes) intended for each block\n @param [in] blockSizeLimit the maximum block size for the kernel, use 0 for no limit\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipModuleOccupancyMaxPotentialBlockSize( - gridSize: *mut ::std::os::raw::c_int, - blockSize: *mut ::std::os::raw::c_int, - f: hipFunction_t, - dynSharedMemPerBlk: usize, - blockSizeLimit: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief determine the grid and block sizes to achieves maximum occupancy for a kernel\n\n @param [out] gridSize minimum grid size for maximum potential occupancy\n @param [out] blockSize block size for maximum potential occupancy\n @param [in] f kernel function for which occupancy is calulated\n @param [in] dynSharedMemPerBlk dynamic shared memory usage (in bytes) intended for each block\n @param [in] blockSizeLimit the maximum block size for the kernel, use 0 for no limit\n @param [in] flags Extra flags for occupancy calculation (only default supported)\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipModuleOccupancyMaxPotentialBlockSizeWithFlags( - gridSize: *mut ::std::os::raw::c_int, - blockSize: *mut ::std::os::raw::c_int, - f: hipFunction_t, - dynSharedMemPerBlk: usize, - blockSizeLimit: ::std::os::raw::c_int, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns occupancy for a device function.\n\n @param [out] numBlocks Returned occupancy\n @param [in] f Kernel function (hipFunction) for which occupancy is calulated\n @param [in] blockSize Block size the kernel is intended to be launched with\n @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipModuleOccupancyMaxActiveBlocksPerMultiprocessor( - numBlocks: *mut ::std::os::raw::c_int, - f: hipFunction_t, - blockSize: ::std::os::raw::c_int, - dynSharedMemPerBlk: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns occupancy for a device function.\n\n @param [out] numBlocks Returned occupancy\n @param [in] f Kernel function(hipFunction_t) for which occupancy is calulated\n @param [in] blockSize Block size the kernel is intended to be launched with\n @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block\n @param [in] flags Extra flags for occupancy calculation (only default supported)\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipModuleOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( - numBlocks: *mut ::std::os::raw::c_int, - f: hipFunction_t, - blockSize: ::std::os::raw::c_int, - dynSharedMemPerBlk: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns occupancy for a device function.\n\n @param [out] numBlocks Returned occupancy\n @param [in] f Kernel function for which occupancy is calulated\n @param [in] blockSize Block size the kernel is intended to be launched with\n @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block\n @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue"] - pub fn hipOccupancyMaxActiveBlocksPerMultiprocessor( - numBlocks: *mut ::std::os::raw::c_int, - f: *const ::std::os::raw::c_void, - blockSize: ::std::os::raw::c_int, - dynSharedMemPerBlk: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns occupancy for a device function.\n\n @param [out] numBlocks Returned occupancy\n @param [in] f Kernel function for which occupancy is calulated\n @param [in] blockSize Block size the kernel is intended to be launched with\n @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block\n @param [in] flags Extra flags for occupancy calculation (currently ignored)\n @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue"] - pub fn hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( - numBlocks: *mut ::std::os::raw::c_int, - f: *const ::std::os::raw::c_void, - blockSize: ::std::os::raw::c_int, - dynSharedMemPerBlk: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief determine the grid and block sizes to achieves maximum occupancy for a kernel\n\n @param [out] gridSize minimum grid size for maximum potential occupancy\n @param [out] blockSize block size for maximum potential occupancy\n @param [in] f kernel function for which occupancy is calulated\n @param [in] dynSharedMemPerBlk dynamic shared memory usage (in bytes) intended for each block\n @param [in] blockSizeLimit the maximum block size for the kernel, use 0 for no limit\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipOccupancyMaxPotentialBlockSize( - gridSize: *mut ::std::os::raw::c_int, - blockSize: *mut ::std::os::raw::c_int, - f: *const ::std::os::raw::c_void, - dynSharedMemPerBlk: usize, - blockSizeLimit: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Start recording of profiling information\n When using this API, start the profiler with profiling disabled. (--startdisabled)\n @returns #hipErrorNotSupported\n @warning : hipProfilerStart API is deprecated, use roctracer/rocTX instead."] - pub fn hipProfilerStart() -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Stop recording of profiling information.\n When using this API, start the profiler with profiling disabled. (--startdisabled)\n @returns #hipErrorNotSupported\n @warning hipProfilerStart API is deprecated, use roctracer/rocTX instead."] - pub fn hipProfilerStop() -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Clang Launch API to support the triple-chevron syntax\n @{\n This section describes the API to support the triple-chevron syntax.\n/\n/**\n @brief Configure a kernel launch.\n\n @param [in] gridDim grid dimension specified as multiple of blockDim.\n @param [in] blockDim block dimensions specified in work-items\n @param [in] sharedMem Amount of dynamic shared memory to allocate for this kernel. The\n HIP-Clang compiler provides support for extern shared declarations.\n @param [in] stream Stream where the kernel should be dispatched. May be 0, in which case the\n default stream is used with associated synchronization rules.\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue\n"] - pub fn hipConfigureCall( - gridDim: dim3, - blockDim: dim3, - sharedMem: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set a kernel argument.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue\n\n @param [in] arg Pointer the argument in host memory.\n @param [in] size Size of the argument.\n @param [in] offset Offset of the argument on the argument stack.\n"] - pub fn hipSetupArgument( - arg: *const ::std::os::raw::c_void, - size: usize, - offset: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Launch a kernel.\n\n @param [in] func Kernel to launch.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue\n"] - pub fn hipLaunchByPtr(func: *const ::std::os::raw::c_void) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief C compliant kernel launch API\n\n @param [in] function_address - kernel stub function pointer.\n @param [in] numBlocks - number of blocks\n @param [in] dimBlocks - dimension of a block\n @param [in] args - kernel arguments\n @param [in] sharedMemBytes - Amount of dynamic shared memory to allocate for this kernel. The\n HIP-Clang compiler provides support for extern shared declarations.\n @param [in] stream - Stream where the kernel should be dispatched. May be 0, in which case th\n default stream is used with associated synchronization rules.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipLaunchKernel( - function_address: *const ::std::os::raw::c_void, - numBlocks: dim3, - dimBlocks: dim3, - args: *mut *mut ::std::os::raw::c_void, - sharedMemBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Enqueues a host function call in a stream.\n\n @param [in] stream - stream to enqueue work to.\n @param [in] fn - function to call once operations enqueued preceeding are complete.\n @param [in] userData - User-specified data to be passed to the function.\n @returns #hipSuccess, #hipErrorInvalidResourceHandle, #hipErrorInvalidValue,\n #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipLaunchHostFunc( - stream: hipStream_t, - fn_: hipHostFn_t, - userData: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " Copies memory for 2D arrays.\n\n @param pCopy - Parameters for the memory copy\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipDrvMemcpy2DUnaligned(pCopy: *const hip_Memcpy2D) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Launches kernel from the pointer address, with arguments and shared memory on stream.\n\n @param [in] function_address pointer to the Kernel to launch.\n @param [in] numBlocks number of blocks.\n @param [in] dimBlocks dimension of a block.\n @param [in] args pointer to kernel arguments.\n @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel.\n HIP-Clang compiler provides support for extern shared declarations.\n @param [in] stream Stream where the kernel should be dispatched.\n May be 0, in which case the default stream is used with associated synchronization rules.\n @param [in] startEvent If non-null, specified event will be updated to track the start time of\n the kernel launch. The event must be created before calling this API.\n @param [in] stopEvent If non-null, specified event will be updated to track the stop time of\n the kernel launch. The event must be created before calling this API.\n @param [in] flags The value of hipExtAnyOrderLaunch, signifies if kernel can be\n launched in any order.\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue.\n"] - pub fn hipExtLaunchKernel( - function_address: *const ::std::os::raw::c_void, - numBlocks: dim3, - dimBlocks: dim3, - args: *mut *mut ::std::os::raw::c_void, - sharedMemBytes: usize, - stream: hipStream_t, - startEvent: hipEvent_t, - stopEvent: hipEvent_t, - flags: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a texture object.\n\n @param [out] pTexObject pointer to the texture object to create\n @param [in] pResDesc pointer to resource descriptor\n @param [in] pTexDesc pointer to texture descriptor\n @param [in] pResViewDesc pointer to resource view descriptor\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported, #hipErrorOutOfMemory\n\n @note 3D liner filter isn't supported on GFX90A boards, on which the API @p hipCreateTextureObject will\n return hipErrorNotSupported.\n"] - pub fn hipCreateTextureObject( - pTexObject: *mut hipTextureObject_t, - pResDesc: *const hipResourceDesc, - pTexDesc: *const hipTextureDesc, - pResViewDesc: *const hipResourceViewDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys a texture object.\n\n @param [in] textureObject texture object to destroy\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipDestroyTextureObject(textureObject: hipTextureObject_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the channel descriptor in an array.\n\n @param [in] desc pointer to channel format descriptor\n @param [out] array memory array on the device\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGetChannelDesc( - desc: *mut hipChannelFormatDesc, - array: hipArray_const_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets resource descriptor for the texture object.\n\n @param [out] pResDesc pointer to resource descriptor\n @param [in] textureObject texture object\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGetTextureObjectResourceDesc( - pResDesc: *mut hipResourceDesc, - textureObject: hipTextureObject_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets resource view descriptor for the texture object.\n\n @param [out] pResViewDesc pointer to resource view descriptor\n @param [in] textureObject texture object\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGetTextureObjectResourceViewDesc( - pResViewDesc: *mut hipResourceViewDesc, - textureObject: hipTextureObject_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets texture descriptor for the texture object.\n\n @param [out] pTexDesc pointer to texture descriptor\n @param [in] textureObject texture object\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGetTextureObjectTextureDesc( - pTexDesc: *mut hipTextureDesc, - textureObject: hipTextureObject_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a texture object.\n\n @param [out] pTexObject pointer to texture object to create\n @param [in] pResDesc pointer to resource descriptor\n @param [in] pTexDesc pointer to texture descriptor\n @param [in] pResViewDesc pointer to resource view descriptor\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipTexObjectCreate( - pTexObject: *mut hipTextureObject_t, - pResDesc: *const HIP_RESOURCE_DESC, - pTexDesc: *const HIP_TEXTURE_DESC, - pResViewDesc: *const HIP_RESOURCE_VIEW_DESC, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys a texture object.\n\n @param [in] texObject texture object to destroy\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipTexObjectDestroy(texObject: hipTextureObject_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets resource descriptor of a texture object.\n\n @param [out] pResDesc pointer to resource descriptor\n @param [in] texObject texture object\n\n @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue\n"] - pub fn hipTexObjectGetResourceDesc( - pResDesc: *mut HIP_RESOURCE_DESC, - texObject: hipTextureObject_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets resource view descriptor of a texture object.\n\n @param [out] pResViewDesc pointer to resource view descriptor\n @param [in] texObject texture object\n\n @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue\n"] - pub fn hipTexObjectGetResourceViewDesc( - pResViewDesc: *mut HIP_RESOURCE_VIEW_DESC, - texObject: hipTextureObject_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets texture descriptor of a texture object.\n\n @param [out] pTexDesc pointer to texture descriptor\n @param [in] texObject texture object\n\n @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue\n"] - pub fn hipTexObjectGetTextureDesc( - pTexDesc: *mut HIP_TEXTURE_DESC, - texObject: hipTextureObject_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Allocate a mipmapped array on the device.\n\n @param[out] mipmappedArray - Pointer to allocated mipmapped array in device memory\n @param[in] desc - Requested channel format\n @param[in] extent - Requested allocation size (width field in elements)\n @param[in] numLevels - Number of mipmap levels to allocate\n @param[in] flags - Flags for extensions\n\n @return #hipSuccess, #hipErrorInvalidValue, #hipErrorMemoryAllocation\n\n @note This API is implemented on Windows, under development on Linux.\n"] - pub fn hipMallocMipmappedArray( - mipmappedArray: *mut hipMipmappedArray_t, - desc: *const hipChannelFormatDesc, - extent: hipExtent, - numLevels: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Frees a mipmapped array on the device.\n\n @param[in] mipmappedArray - Pointer to mipmapped array to free\n\n @return #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Windows, under development on Linux.\n"] - pub fn hipFreeMipmappedArray(mipmappedArray: hipMipmappedArray_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets a mipmap level of a HIP mipmapped array.\n\n @param[out] levelArray - Returned mipmap level HIP array\n @param[in] mipmappedArray - HIP mipmapped array\n @param[in] level - Mipmap level\n\n @return #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Windows, under development on Linux.\n"] - pub fn hipGetMipmappedArrayLevel( - levelArray: *mut hipArray_t, - mipmappedArray: hipMipmappedArray_const_t, - level: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create a mipmapped array.\n\n @param [out] pHandle pointer to mipmapped array\n @param [in] pMipmappedArrayDesc mipmapped array descriptor\n @param [in] numMipmapLevels mipmap level\n\n @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue\n\n @note This API is implemented on Windows, under development on Linux."] - pub fn hipMipmappedArrayCreate( - pHandle: *mut hipMipmappedArray_t, - pMipmappedArrayDesc: *mut HIP_ARRAY3D_DESCRIPTOR, - numMipmapLevels: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroy a mipmapped array.\n\n @param [out] hMipmappedArray pointer to mipmapped array to destroy\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Windows, under development on Linux.\n"] - pub fn hipMipmappedArrayDestroy(hMipmappedArray: hipMipmappedArray_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get a mipmapped array on a mipmapped level.\n\n @param [in] pLevelArray Pointer of array\n @param [out] hMipMappedArray Pointer of mipmapped array on the requested mipmap level\n @param [out] level Mipmap level\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @note This API is implemented on Windows, under development on Linux.\n"] - pub fn hipMipmappedArrayGetLevel( - pLevelArray: *mut hipArray_t, - hMipMappedArray: hipMipmappedArray_t, - level: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Binds a mipmapped array to a texture.\n\n @param [in] tex pointer to the texture reference to bind\n @param [in] mipmappedArray memory mipmapped array on the device\n @param [in] desc opointer to the channel format\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipBindTextureToMipmappedArray( - tex: *const textureReference, - mipmappedArray: hipMipmappedArray_const_t, - desc: *const hipChannelFormatDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the texture reference related with the symbol.\n\n @param [out] texref texture reference\n @param [in] symbol pointer to the symbol related with the texture for the reference\n\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning This API is deprecated.\n"] - pub fn hipGetTextureReference( - texref: *mut *const textureReference, - symbol: *const ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the border color used by a texture reference.\n\n @param [out] pBorderColor Returned Type and Value of RGBA color.\n @param [in] texRef Texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetBorderColor( - pBorderColor: *mut f32, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the array bound to a texture reference.\n\n\n @param [in] pArray Returned array.\n @param [in] texRef texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetArray( - pArray: *mut hipArray_t, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets address mode for a texture reference.\n\n @param [in] texRef texture reference.\n @param [in] dim Dimension of the texture.\n @param [in] am Value of the texture address mode.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetAddressMode( - texRef: *mut textureReference, - dim: ::std::os::raw::c_int, - am: hipTextureAddressMode, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Binds an array as a texture reference.\n\n @param [in] tex Pointer texture reference.\n @param [in] array Array to bind.\n @param [in] flags Flags should be set as HIP_TRSA_OVERRIDE_FORMAT, as a valid value.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetArray( - tex: *mut textureReference, - array: hipArray_const_t, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set filter mode for a texture reference.\n\n @param [in] texRef Pointer texture reference.\n @param [in] fm Value of texture filter mode.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetFilterMode( - texRef: *mut textureReference, - fm: hipTextureFilterMode, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set flags for a texture reference.\n\n @param [in] texRef Pointer texture reference.\n @param [in] Flags Value of flags.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetFlags( - texRef: *mut textureReference, - Flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set format for a texture reference.\n\n @param [in] texRef Pointer texture reference.\n @param [in] fmt Value of format.\n @param [in] NumPackedComponents Number of components per array.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetFormat( - texRef: *mut textureReference, - fmt: hipArray_Format, - NumPackedComponents: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Binds a memory area to a texture.\n\n @param [in] offset Offset in bytes.\n @param [in] tex Texture to bind.\n @param [in] devPtr Pointer of memory on the device.\n @param [in] desc Pointer of channel format descriptor.\n @param [in] size Size of memory in bites.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipBindTexture( - offset: *mut usize, - tex: *const textureReference, - devPtr: *const ::std::os::raw::c_void, - desc: *const hipChannelFormatDesc, - size: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Binds a 2D memory area to a texture.\n\n @param [in] offset Offset in bytes.\n @param [in] tex Texture to bind.\n @param [in] devPtr Pointer of 2D memory area on the device.\n @param [in] desc Pointer of channel format descriptor.\n @param [in] width Width in texel units.\n @param [in] height Height in texel units.\n @param [in] pitch Pitch in bytes.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipBindTexture2D( - offset: *mut usize, - tex: *const textureReference, - devPtr: *const ::std::os::raw::c_void, - desc: *const hipChannelFormatDesc, - width: usize, - height: usize, - pitch: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Binds a memory area to a texture.\n\n @param [in] tex Pointer of texture reference.\n @param [in] array Array to bind.\n @param [in] desc Pointer of channel format descriptor.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipBindTextureToArray( - tex: *const textureReference, - array: hipArray_const_t, - desc: *const hipChannelFormatDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get the offset of the alignment in a texture.\n\n @param [in] offset Offset in bytes.\n @param [in] texref Pointer of texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipGetTextureAlignmentOffset( - offset: *mut usize, - texref: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Unbinds a texture.\n\n @param [in] tex Texture to unbind.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipUnbindTexture(tex: *const textureReference) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the the address for a texture reference.\n\n @param [out] dev_ptr Pointer of device address.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetAddress( - dev_ptr: *mut hipDeviceptr_t, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the address mode for a texture reference.\n\n @param [out] pam Pointer of address mode.\n @param [in] texRef Pointer of texture reference.\n @param [in] dim Dimension.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetAddressMode( - pam: *mut hipTextureAddressMode, - texRef: *const textureReference, - dim: ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets filter mode for a texture reference.\n\n @param [out] pfm Pointer of filter mode.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetFilterMode( - pfm: *mut hipTextureFilterMode, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets flags for a texture reference.\n\n @param [out] pFlags Pointer of flags.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetFlags( - pFlags: *mut ::std::os::raw::c_uint, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets texture format for a texture reference.\n\n @param [out] pFormat Pointer of the format.\n @param [out] pNumChannels Pointer of number of channels.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetFormat( - pFormat: *mut hipArray_Format, - pNumChannels: *mut ::std::os::raw::c_int, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the maximum anisotropy for a texture reference.\n\n @param [out] pmaxAnsio Pointer of the maximum anisotropy.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetMaxAnisotropy( - pmaxAnsio: *mut ::std::os::raw::c_int, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the mipmap filter mode for a texture reference.\n\n @param [out] pfm Pointer of the mipmap filter mode.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetMipmapFilterMode( - pfm: *mut hipTextureFilterMode, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the mipmap level bias for a texture reference.\n\n @param [out] pbias Pointer of the mipmap level bias.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetMipmapLevelBias( - pbias: *mut f32, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the minimum and maximum mipmap level clamps for a texture reference.\n\n @param [out] pminMipmapLevelClamp Pointer of the minimum mipmap level clamp.\n @param [out] pmaxMipmapLevelClamp Pointer of the maximum mipmap level clamp.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetMipmapLevelClamp( - pminMipmapLevelClamp: *mut f32, - pmaxMipmapLevelClamp: *mut f32, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets the mipmapped array bound to a texture reference.\n\n @param [out] pArray Pointer of the mipmapped array.\n @param [in] texRef Pointer of texture reference.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefGetMipMappedArray( - pArray: *mut hipMipmappedArray_t, - texRef: *const textureReference, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets an bound address for a texture reference.\n\n @param [out] ByteOffset Pointer of the offset in bytes.\n @param [in] texRef Pointer of texture reference.\n @param [in] dptr Pointer of device address to bind.\n @param [in] bytes Size in bytes.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetAddress( - ByteOffset: *mut usize, - texRef: *mut textureReference, - dptr: hipDeviceptr_t, - bytes: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set a bind an address as a 2D texture reference.\n\n @param [in] texRef Pointer of texture reference.\n @param [in] desc Pointer of array descriptor.\n @param [in] dptr Pointer of device address to bind.\n @param [in] Pitch Pitch in bytes.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetAddress2D( - texRef: *mut textureReference, - desc: *const HIP_ARRAY_DESCRIPTOR, - dptr: hipDeviceptr_t, - Pitch: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the maximum anisotropy for a texture reference.\n\n @param [in] texRef Pointer of texture reference.\n @param [out] maxAniso Value of the maximum anisotropy.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetMaxAnisotropy( - texRef: *mut textureReference, - maxAniso: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets border color for a texture reference.\n\n @param [in] texRef Pointer of texture reference.\n @param [in] pBorderColor Pointer of border color.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetBorderColor( - texRef: *mut textureReference, - pBorderColor: *mut f32, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets mipmap filter mode for a texture reference.\n\n @param [in] texRef Pointer of texture reference.\n @param [in] fm Value of filter mode.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetMipmapFilterMode( - texRef: *mut textureReference, - fm: hipTextureFilterMode, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets mipmap level bias for a texture reference.\n\n @param [in] texRef Pointer of texture reference.\n @param [in] bias Value of mipmap bias.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetMipmapLevelBias(texRef: *mut textureReference, bias: f32) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets mipmap level clamp for a texture reference.\n\n @param [in] texRef Pointer of texture reference.\n @param [in] minMipMapLevelClamp Value of minimum mipmap level clamp.\n @param [in] maxMipMapLevelClamp Value of maximum mipmap level clamp.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetMipmapLevelClamp( - texRef: *mut textureReference, - minMipMapLevelClamp: f32, - maxMipMapLevelClamp: f32, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Binds mipmapped array to a texture reference.\n\n @param [in] texRef Pointer of texture reference to bind.\n @param [in] mipmappedArray Pointer of mipmapped array to bind.\n @param [in] Flags Flags should be set as HIP_TRSA_OVERRIDE_FORMAT, as a valid value.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning This API is deprecated.\n"] - pub fn hipTexRefSetMipmappedArray( - texRef: *mut textureReference, - mipmappedArray: *mut hipMipmappedArray, - Flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[doc = " @defgroup Callback Callback Activity APIs\n @{\n This section describes the callback/Activity of HIP runtime API.\n/\n/**\n @brief Returns HIP API name by ID.\n\n @param [in] id ID of HIP API\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipApiName(id: u32) -> *const ::std::os::raw::c_char; -} -extern "C" { - #[doc = " @brief Returns kernel name reference by function name.\n\n @param [in] f Name of function\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipKernelNameRef(f: hipFunction_t) -> *const ::std::os::raw::c_char; -} -extern "C" { - #[doc = " @brief Retrives kernel for a given host pointer, unless stated otherwise.\n\n @param [in] hostFunction Pointer of host function.\n @param [in] stream Stream the kernel is executed on.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipKernelNameRefByPtr( - hostFunction: *const ::std::os::raw::c_void, - stream: hipStream_t, - ) -> *const ::std::os::raw::c_char; -} -extern "C" { - #[doc = " @brief Returns device ID on the stream.\n\n @param [in] stream Stream of device executed on.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGetStreamDeviceId(stream: hipStream_t) -> ::std::os::raw::c_int; -} -extern "C" { - #[must_use] - #[doc = " @brief Begins graph capture on a stream.\n\n @param [in] stream - Stream to initiate capture.\n @param [in] mode - Controls the interaction of this capture sequence with other API calls that\n are not safe.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipStreamBeginCapture(stream: hipStream_t, mode: hipStreamCaptureMode) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Ends capture on a stream, returning the captured graph.\n\n @param [in] stream - Stream to end capture.\n @param [out] pGraph - returns the graph captured.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipStreamEndCapture(stream: hipStream_t, pGraph: *mut hipGraph_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get capture status of a stream.\n\n @param [in] stream - Stream under capture.\n @param [out] pCaptureStatus - returns current status of the capture.\n @param [out] pId - unique ID of the capture.\n\n @returns #hipSuccess, #hipErrorStreamCaptureImplicit\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipStreamGetCaptureInfo( - stream: hipStream_t, - pCaptureStatus: *mut hipStreamCaptureStatus, - pId: *mut ::std::os::raw::c_ulonglong, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get stream's capture state\n\n @param [in] stream - Stream under capture.\n @param [out] captureStatus_out - returns current status of the capture.\n @param [out] id_out - unique ID of the capture.\n @param [in] graph_out - returns the graph being captured into.\n @param [out] dependencies_out - returns pointer to an array of nodes.\n @param [out] numDependencies_out - returns size of the array returned in dependencies_out.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorStreamCaptureImplicit\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipStreamGetCaptureInfo_v2( - stream: hipStream_t, - captureStatus_out: *mut hipStreamCaptureStatus, - id_out: *mut ::std::os::raw::c_ulonglong, - graph_out: *mut hipGraph_t, - dependencies_out: *mut *const hipGraphNode_t, - numDependencies_out: *mut usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get stream's capture state\n\n @param [in] stream - Stream under capture.\n @param [out] pCaptureStatus - returns current status of the capture.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorStreamCaptureImplicit\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipStreamIsCapturing( - stream: hipStream_t, - pCaptureStatus: *mut hipStreamCaptureStatus, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Update the set of dependencies in a capturing stream\n\n @param [in] stream Stream under capture.\n @param [in] dependencies pointer to an array of nodes to Add/Replace.\n @param [in] numDependencies size of the array in dependencies.\n @param [in] flags Flag how to update dependency set. Should be one of value in enum\n #hipStreamUpdateCaptureDependenciesFlags\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorIllegalState\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipStreamUpdateCaptureDependencies( - stream: hipStream_t, - dependencies: *mut hipGraphNode_t, - numDependencies: usize, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Swaps the stream capture mode of a thread.\n\n @param [in] mode - Pointer to mode value to swap with the current mode\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipThreadExchangeStreamCaptureMode(mode: *mut hipStreamCaptureMode) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a graph\n\n @param [out] pGraph - pointer to graph to create.\n @param [in] flags - flags for graph creation, must be 0.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorMemoryAllocation\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphCreate(pGraph: *mut hipGraph_t, flags: ::std::os::raw::c_uint) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys a graph\n\n @param [in] graph - instance of graph to destroy.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphDestroy(graph: hipGraph_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Adds dependency edges to a graph.\n\n @param [in] graph - instance of the graph to add dependencies.\n @param [in] from - pointer to the graph nodes with dependenties to add from.\n @param [in] to - pointer to the graph nodes to add dependenties to.\n @param [in] numDependencies - the number of dependencies to add.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphAddDependencies( - graph: hipGraph_t, - from: *const hipGraphNode_t, - to: *const hipGraphNode_t, - numDependencies: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Removes dependency edges from a graph.\n\n @param [in] graph - instance of the graph to remove dependencies.\n @param [in] from - Array of nodes that provide the dependencies.\n @param [in] to - Array of dependent nodes.\n @param [in] numDependencies - the number of dependencies to remove.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphRemoveDependencies( - graph: hipGraph_t, - from: *const hipGraphNode_t, - to: *const hipGraphNode_t, - numDependencies: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a graph's dependency edges.\n\n @param [in] graph - instance of the graph to get the edges from.\n @param [out] from - pointer to the graph nodes to return edge endpoints.\n @param [out] to - pointer to the graph nodes to return edge endpoints.\n @param [out] numEdges - returns number of edges.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n from and to may both be NULL, in which case this function only returns the number of edges in\n numEdges. Otherwise, numEdges entries will be filled in. If numEdges is higher than the actual\n number of edges, the remaining entries in from and to will be set to NULL, and the number of\n edges actually returned will be written to numEdges\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphGetEdges( - graph: hipGraph_t, - from: *mut hipGraphNode_t, - to: *mut hipGraphNode_t, - numEdges: *mut usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns graph nodes.\n\n @param [in] graph - instance of graph to get the nodes.\n @param [out] nodes - pointer to return the graph nodes.\n @param [out] numNodes - returns number of graph nodes.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n nodes may be NULL, in which case this function will return the number of nodes in numNodes.\n Otherwise, numNodes entries will be filled in. If numNodes is higher than the actual number of\n nodes, the remaining entries in nodes will be set to NULL, and the number of nodes actually\n obtained will be returned in numNodes.\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphGetNodes( - graph: hipGraph_t, - nodes: *mut hipGraphNode_t, - numNodes: *mut usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns graph's root nodes.\n\n @param [in] graph - instance of the graph to get the nodes.\n @param [out] pRootNodes - pointer to return the graph's root nodes.\n @param [out] pNumRootNodes - returns the number of graph's root nodes.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n pRootNodes may be NULL, in which case this function will return the number of root nodes in\n pNumRootNodes. Otherwise, pNumRootNodes entries will be filled in. If pNumRootNodes is higher\n than the actual number of root nodes, the remaining entries in pRootNodes will be set to NULL,\n and the number of nodes actually obtained will be returned in pNumRootNodes.\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphGetRootNodes( - graph: hipGraph_t, - pRootNodes: *mut hipGraphNode_t, - pNumRootNodes: *mut usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a node's dependencies.\n\n @param [in] node - graph node to get the dependencies from.\n @param [out] pDependencies - pointer to to return the dependencies.\n @param [out] pNumDependencies - returns the number of graph node dependencies.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n pDependencies may be NULL, in which case this function will return the number of dependencies in\n pNumDependencies. Otherwise, pNumDependencies entries will be filled in. If pNumDependencies is\n higher than the actual number of dependencies, the remaining entries in pDependencies will be set\n to NULL, and the number of nodes actually obtained will be returned in pNumDependencies.\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphNodeGetDependencies( - node: hipGraphNode_t, - pDependencies: *mut hipGraphNode_t, - pNumDependencies: *mut usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a node's dependent nodes.\n\n @param [in] node - graph node to get the Dependent nodes from.\n @param [out] pDependentNodes - pointer to return the graph dependent nodes.\n @param [out] pNumDependentNodes - returns the number of graph node dependent nodes.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n DependentNodes may be NULL, in which case this function will return the number of dependent nodes\n in pNumDependentNodes. Otherwise, pNumDependentNodes entries will be filled in. If\n pNumDependentNodes is higher than the actual number of dependent nodes, the remaining entries in\n pDependentNodes will be set to NULL, and the number of nodes actually obtained will be returned\n in pNumDependentNodes.\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphNodeGetDependentNodes( - node: hipGraphNode_t, - pDependentNodes: *mut hipGraphNode_t, - pNumDependentNodes: *mut usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a node's type.\n\n @param [in] node - instance of the graph to add dependencies.\n @param [out] pType - pointer to the return the type\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphNodeGetType(node: hipGraphNode_t, pType: *mut hipGraphNodeType) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Remove a node from the graph.\n\n @param [in] node - graph node to remove\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphDestroyNode(node: hipGraphNode_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Clones a graph.\n\n @param [out] pGraphClone - Returns newly created cloned graph.\n @param [in] originalGraph - original graph to clone from.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorMemoryAllocation\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphClone(pGraphClone: *mut hipGraph_t, originalGraph: hipGraph_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Finds a cloned version of a node.\n\n @param [out] pNode - Returns the cloned node.\n @param [in] originalNode - original node handle.\n @param [in] clonedGraph - Cloned graph to query.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphNodeFindInClone( - pNode: *mut hipGraphNode_t, - originalNode: hipGraphNode_t, - clonedGraph: hipGraph_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates an executable graph from a graph\n\n @param [out] pGraphExec - pointer to instantiated executable graph that is created.\n @param [in] graph - instance of graph to instantiate.\n @param [out] pErrorNode - pointer to error node in case error occured in graph instantiation,\n it could modify the correponding node.\n @param [out] pLogBuffer - pointer to log buffer.\n @param [out] bufferSize - the size of log buffer.\n\n @returns #hipSuccess, #hipErrorOutOfMemory\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n"] - pub fn hipGraphInstantiate( - pGraphExec: *mut hipGraphExec_t, - graph: hipGraph_t, - pErrorNode: *mut hipGraphNode_t, - pLogBuffer: *mut ::std::os::raw::c_char, - bufferSize: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates an executable graph from a graph.\n\n @param [out] pGraphExec - pointer to instantiated executable graph that is created.\n @param [in] graph - instance of graph to instantiate.\n @param [in] flags - Flags to control instantiation.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.It does not support\n any of flag and is behaving as hipGraphInstantiate."] - pub fn hipGraphInstantiateWithFlags( - pGraphExec: *mut hipGraphExec_t, - graph: hipGraph_t, - flags: ::std::os::raw::c_ulonglong, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief launches an executable graph in a stream\n\n @param [in] graphExec - instance of executable graph to launch.\n @param [in] stream - instance of stream in which to launch executable graph.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphLaunch(graphExec: hipGraphExec_t, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief uploads an executable graph in a stream\n\n @param [in] graphExec - instance of executable graph to launch.\n @param [in] stream - instance of stream in which to launch executable graph.\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphUpload(graphExec: hipGraphExec_t, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroys an executable graph\n\n @param [in] graphExec - instance of executable graph to destry.\n\n @returns #hipSuccess.\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecDestroy(graphExec: hipGraphExec_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Check whether an executable graph can be updated with a graph and perform the update if *\n possible.\n\n @param [in] hGraphExec - instance of executable graph to update.\n @param [in] hGraph - graph that contains the updated parameters.\n @param [in] hErrorNode_out - node which caused the permissibility check to forbid the update.\n @param [in] updateResult_out - Whether the graph update was permitted.\n @returns #hipSuccess, #hipErrorGraphExecUpdateFailure\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecUpdate( - hGraphExec: hipGraphExec_t, - hGraph: hipGraph_t, - hErrorNode_out: *mut hipGraphNode_t, - updateResult_out: *mut hipGraphExecUpdateResult, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a kernel execution node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to graph node to create.\n @param [in] graph - instance of graph to add the created node.\n @param [in] pDependencies - pointer to the dependencies on the kernel execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] pNodeParams - pointer to the parameters to the kernel execution node on the GPU.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDeviceFunction\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddKernelNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - pNodeParams: *const hipKernelNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets kernel node's parameters.\n\n @param [in] node - instance of the node to get parameters from.\n @param [out] pNodeParams - pointer to the parameters\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphKernelNodeGetParams( - node: hipGraphNode_t, - pNodeParams: *mut hipKernelNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a kernel node's parameters.\n\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - const pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphKernelNodeSetParams( - node: hipGraphNode_t, - pNodeParams: *const hipKernelNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the parameters for a kernel node in the given graphExec.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - const pointer to the kernel node parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecKernelNodeSetParams( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - pNodeParams: *const hipKernelNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memcpy node and adds it to a graph.\n\n @param [out] phGraphNode - pointer to graph node to create.\n @param [in] hGraph - instance of graph to add the created node.\n @param [in] dependencies - const pointer to the dependencies on the memcpy execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] copyParams - const pointer to the parameters for the memory copy.\n @param [in] ctx - cotext related to current device.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDrvGraphAddMemcpyNode( - phGraphNode: *mut hipGraphNode_t, - hGraph: hipGraph_t, - dependencies: *const hipGraphNode_t, - numDependencies: usize, - copyParams: *const HIP_MEMCPY3D, - ctx: hipCtx_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memcpy node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to graph node to create.\n @param [in] graph - instance of graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] pCopyParams - const pointer to the parameters for the memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddMemcpyNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - pCopyParams: *const hipMemcpy3DParms, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets a memcpy node's parameters.\n\n @param [in] node - instance of the node to get parameters from.\n @param [out] pNodeParams - pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemcpyNodeGetParams( - node: hipGraphNode_t, - pNodeParams: *mut hipMemcpy3DParms, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a memcpy node's parameters.\n\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - const pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemcpyNodeSetParams( - node: hipGraphNode_t, - pNodeParams: *const hipMemcpy3DParms, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a node attribute.\n\n @param [in] hNode - instance of the node to set parameters to.\n @param [in] attr - the attribute node is set to.\n @param [in] value - const pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphKernelNodeSetAttribute( - hNode: hipGraphNode_t, - attr: hipKernelNodeAttrID, - value: *const hipKernelNodeAttrValue, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets a node attribute.\n\n @param [in] hNode - instance of the node to set parameters to.\n @param [in] attr - the attribute node is set to.\n @param [in] value - const pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphKernelNodeGetAttribute( - hNode: hipGraphNode_t, - attr: hipKernelNodeAttrID, - value: *mut hipKernelNodeAttrValue, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the parameters for a memcpy node in the given graphExec.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - const pointer to the kernel node parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecMemcpyNodeSetParams( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - pNodeParams: *mut hipMemcpy3DParms, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a 1D memcpy node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to graph node to create.\n @param [in] graph - instance of graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] dst - pointer to memory address to the destination.\n @param [in] src - pointer to memory address to the source.\n @param [in] count - the size of the memory to copy.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddMemcpyNode1D( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - count: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a memcpy node's parameters to perform a 1-dimensional copy.\n\n @param [in] node - instance of the node to set parameters to.\n @param [in] dst - pointer to memory address to the destination.\n @param [in] src - pointer to memory address to the source.\n @param [in] count - the size of the memory to copy.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemcpyNodeSetParams1D( - node: hipGraphNode_t, - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - count: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the parameters for a memcpy node in the given graphExec to perform a 1-dimensional\n copy.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - instance of the node to set parameters to.\n @param [in] dst - pointer to memory address to the destination.\n @param [in] src - pointer to memory address to the source.\n @param [in] count - the size of the memory to copy.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecMemcpyNodeSetParams1D( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - count: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph.\n\n @param [out] pGraphNode - pointer to graph node to create.\n @param [in] graph - instance of graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] dst - pointer to memory address to the destination.\n @param [in] symbol - Device symbol address.\n @param [in] count - the size of the memory to copy.\n @param [in] offset - Offset from start of symbol in bytes.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddMemcpyNodeFromSymbol( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - dst: *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - count: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a memcpy node's parameters to copy from a symbol on the device.\n\n @param [in] node - instance of the node to set parameters to.\n @param [in] dst - pointer to memory address to the destination.\n @param [in] symbol - Device symbol address.\n @param [in] count - the size of the memory to copy.\n @param [in] offset - Offset from start of symbol in bytes.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemcpyNodeSetParamsFromSymbol( - node: hipGraphNode_t, - dst: *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - count: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the\n * device.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - instance of the node to set parameters to.\n @param [in] dst - pointer to memory address to the destination.\n @param [in] symbol - Device symbol address.\n @param [in] count - the size of the memory to copy.\n @param [in] offset - Offset from start of symbol in bytes.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecMemcpyNodeSetParamsFromSymbol( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - dst: *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - count: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph.\n\n @param [out] pGraphNode - pointer to graph node to create.\n @param [in] graph - instance of graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] symbol - Device symbol address.\n @param [in] src - pointer to memory address of the src.\n @param [in] count - the size of the memory to copy.\n @param [in] offset - Offset from start of symbol in bytes.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddMemcpyNodeToSymbol( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - symbol: *const ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - count: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a memcpy node's parameters to copy to a symbol on the device.\n\n @param [in] node - instance of the node to set parameters to.\n @param [in] symbol - Device symbol address.\n @param [in] src - pointer to memory address of the src.\n @param [in] count - the size of the memory to copy.\n @param [in] offset - Offset from start of symbol in bytes.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemcpyNodeSetParamsToSymbol( - node: hipGraphNode_t, - symbol: *const ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - count: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the\n device.\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - instance of the node to set parameters to.\n @param [in] symbol - Device symbol address.\n @param [in] src - pointer to memory address of the src.\n @param [in] count - the size of the memory to copy.\n @param [in] offset - Offset from start of symbol in bytes.\n @param [in] kind - the type of memory copy.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecMemcpyNodeSetParamsToSymbol( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - symbol: *const ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - count: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memset node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create.\n @param [in] graph - instance of the graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memset execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] pMemsetParams - const pointer to the parameters for the memory set.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddMemsetNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - pMemsetParams: *const hipMemsetParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets a memset node's parameters.\n\n @param [in] node - instane of the node to get parameters from.\n @param [out] pNodeParams - pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemsetNodeGetParams( - node: hipGraphNode_t, - pNodeParams: *mut hipMemsetParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a memset node's parameters.\n\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemsetNodeSetParams( - node: hipGraphNode_t, - pNodeParams: *const hipMemsetParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the parameters for a memset node in the given graphExec.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecMemsetNodeSetParams( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - pNodeParams: *const hipMemsetParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a host execution node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create.\n @param [in] graph - instance of the graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memset execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] pNodeParams -pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddHostNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - pNodeParams: *const hipHostNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns a host node's parameters.\n\n @param [in] node - instane of the node to get parameters from.\n @param [out] pNodeParams - pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphHostNodeGetParams( - node: hipGraphNode_t, - pNodeParams: *mut hipHostNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets a host node's parameters.\n\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphHostNodeSetParams( - node: hipGraphNode_t, - pNodeParams: *const hipHostNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the parameters for a host node in the given graphExec.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - instance of the node to set parameters to.\n @param [in] pNodeParams - pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecHostNodeSetParams( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - pNodeParams: *const hipHostNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a child graph node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create.\n @param [in] graph - instance of the graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memset execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] childGraph - the graph to clone into this node\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddChildGraphNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - childGraph: hipGraph_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets a handle to the embedded graph of a child graph node.\n\n @param [in] node - instane of the node to get child graph.\n @param [out] pGraph - pointer to get the graph.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphChildGraphNodeGetGraph( - node: hipGraphNode_t, - pGraph: *mut hipGraph_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Updates node parameters in the child graph node in the given graphExec.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] node - node from the graph which was used to instantiate graphExec.\n @param [in] childGraph - child graph with updated parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecChildGraphNodeSetParams( - hGraphExec: hipGraphExec_t, - node: hipGraphNode_t, - childGraph: hipGraph_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates an empty node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create and add to the graph.\n @param [in] graph - instane of the graph the node is add to.\n @param [in] pDependencies - const pointer to the node dependenties.\n @param [in] numDependencies - the number of dependencies.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddEmptyNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates an event record node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create and add to the graph.\n @param [in] graph - instane of the graph the node to be added.\n @param [in] pDependencies - const pointer to the node dependenties.\n @param [in] numDependencies - the number of dependencies.\n @param [in] event - Event for the node.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddEventRecordNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - event: hipEvent_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the event associated with an event record node.\n\n @param [in] node - instane of the node to get event from.\n @param [out] event_out - Pointer to return the event.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphEventRecordNodeGetEvent( - node: hipGraphNode_t, - event_out: *mut hipEvent_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets an event record node's event.\n\n @param [in] node - instane of the node to set event to.\n @param [in] event - pointer to the event.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphEventRecordNodeSetEvent(node: hipGraphNode_t, event: hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the event for an event record node in the given graphExec.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] hNode - node from the graph which was used to instantiate graphExec.\n @param [in] event - pointer to the event.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecEventRecordNodeSetEvent( - hGraphExec: hipGraphExec_t, - hNode: hipGraphNode_t, - event: hipEvent_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates an event wait node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create and add to the graph.\n @param [in] graph - instane of the graph the node to be added.\n @param [in] pDependencies - const pointer to the node dependenties.\n @param [in] numDependencies - the number of dependencies.\n @param [in] event - Event for the node.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddEventWaitNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - event: hipEvent_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the event associated with an event wait node.\n\n @param [in] node - instane of the node to get event from.\n @param [out] event_out - Pointer to return the event.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphEventWaitNodeGetEvent( - node: hipGraphNode_t, - event_out: *mut hipEvent_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets an event wait node's event.\n\n @param [in] node - instane of the node to set event to.\n @param [in] event - pointer to the event.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphEventWaitNodeSetEvent(node: hipGraphNode_t, event: hipEvent_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Sets the event for an event record node in the given graphExec.\n\n @param [in] hGraphExec - instance of the executable graph with the node.\n @param [in] hNode - node from the graph which was used to instantiate graphExec.\n @param [in] event - pointer to the event.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecEventWaitNodeSetEvent( - hGraphExec: hipGraphExec_t, - hNode: hipGraphNode_t, - event: hipEvent_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memory allocation node and adds it to a graph\n\n @param [out] pGraphNode - Pointer to the graph node to create and add to the graph\n @param [in] graph - Instane of the graph the node to be added\n @param [in] pDependencies - Const pointer to the node dependenties\n @param [in] numDependencies - The number of dependencies\n @param [in] pNodeParams - Node parameters for memory allocation\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddMemAllocNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - pNodeParams: *mut hipMemAllocNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns parameters for memory allocation node\n\n @param [in] node - Memory allocation node for a query\n @param [out] pNodeParams - Parameters for the specified memory allocation node\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemAllocNodeGetParams( - node: hipGraphNode_t, - pNodeParams: *mut hipMemAllocNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memory free node and adds it to a graph\n\n @param [out] pGraphNode - Pointer to the graph node to create and add to the graph\n @param [in] graph - Instane of the graph the node to be added\n @param [in] pDependencies - Const pointer to the node dependenties\n @param [in] numDependencies - The number of dependencies\n @param [in] dev_ptr - Pointer to the memory to be freed\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddMemFreeNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - dev_ptr: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns parameters for memory free node\n\n @param [in] node - Memory free node for a query\n @param [out] dev_ptr - Device pointer for the specified memory free node\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphMemFreeNodeGetParams( - node: hipGraphNode_t, - dev_ptr: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get the mem attribute for graphs.\n\n @param [in] device - device the attr is get for.\n @param [in] attr - attr to get.\n @param [out] value - value for specific attr.\n @returns #hipSuccess, #hipErrorInvalidDevice\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDeviceGetGraphMemAttribute( - device: ::std::os::raw::c_int, - attr: hipGraphMemAttributeType, - value: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set the mem attribute for graphs.\n\n @param [in] device - device the attr is set for.\n @param [in] attr - attr to set.\n @param [in] value - value for specific attr.\n @returns #hipSuccess, #hipErrorInvalidDevice\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDeviceSetGraphMemAttribute( - device: ::std::os::raw::c_int, - attr: hipGraphMemAttributeType, - value: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Free unused memory on specific device used for graph back to OS.\n\n @param [in] device - device the memory is used for graphs\n @returns #hipSuccess, #hipErrorInvalidDevice\n\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDeviceGraphMemTrim(device: ::std::os::raw::c_int) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create an instance of userObject to manage lifetime of a resource.\n\n @param [out] object_out - pointer to instace of userobj.\n @param [in] ptr - pointer to pass to destroy function.\n @param [in] destroy - destroy callback to remove resource.\n @param [in] initialRefcount - reference to resource.\n @param [in] flags - flags passed to API.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipUserObjectCreate( - object_out: *mut hipUserObject_t, - ptr: *mut ::std::os::raw::c_void, - destroy: hipHostFn_t, - initialRefcount: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Release number of references to resource.\n\n @param [in] object - pointer to instace of userobj.\n @param [in] count - reference to resource to be retained.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipUserObjectRelease( - object: hipUserObject_t, - count: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Retain number of references to resource.\n\n @param [in] object - pointer to instace of userobj.\n @param [in] count - reference to resource to be retained.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipUserObjectRetain( - object: hipUserObject_t, - count: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Retain user object for graphs.\n\n @param [in] graph - pointer to graph to retain the user object for.\n @param [in] object - pointer to instace of userobj.\n @param [in] count - reference to resource to be retained.\n @param [in] flags - flags passed to API.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphRetainUserObject( - graph: hipGraph_t, - object: hipUserObject_t, - count: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Release user object from graphs.\n\n @param [in] graph - pointer to graph to retain the user object for.\n @param [in] object - pointer to instace of userobj.\n @param [in] count - reference to resource to be retained.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphReleaseUserObject( - graph: hipGraph_t, - object: hipUserObject_t, - count: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Write a DOT file describing graph structure.\n\n @param [in] graph - graph object for which DOT file has to be generated.\n @param [in] path - path to write the DOT file.\n @param [in] flags - Flags from hipGraphDebugDotFlags to get additional node information.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOperatingSystem\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphDebugDotPrint( - graph: hipGraph_t, - path: *const ::std::os::raw::c_char, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Copies attributes from source node to destination node.\n\n Copies attributes from source node to destination node.\n Both node must have the same context.\n\n @param [out] hDst - Destination node.\n @param [in] hSrc - Source node.\n For list of attributes see ::hipKernelNodeAttrID.\n\n @returns #hipSuccess, #hipErrorInvalidContext\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphKernelNodeCopyAttributes( - hSrc: hipGraphNode_t, - hDst: hipGraphNode_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Enables or disables the specified node in the given graphExec\n\n Sets hNode to be either enabled or disabled. Disabled nodes are functionally equivalent\n to empty nodes until they are reenabled. Existing node parameters are not affected by\n disabling/enabling the node.\n\n The node is identified by the corresponding hNode in the non-executable graph, from which the\n executable graph was instantiated.\n\n hNode must not have been removed from the original graph.\n\n @note Currently only kernel, memset and memcpy nodes are supported.\n\n @param [in] hGraphExec - The executable graph in which to set the specified node.\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [in] isEnabled - Node is enabled if != 0, otherwise the node is disabled.\n\n @returns #hipSuccess, #hipErrorInvalidValue,\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphNodeSetEnabled( - hGraphExec: hipGraphExec_t, - hNode: hipGraphNode_t, - isEnabled: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Query whether a node in the given graphExec is enabled\n\n Sets isEnabled to 1 if hNode is enabled, or 0 if it is disabled.\n\n The node is identified by the corresponding node in the non-executable graph, from which the\n executable graph was instantiated.\n\n hNode must not have been removed from the original graph.\n\n @note Currently only kernel, memset and memcpy nodes are supported.\n\n @param [in] hGraphExec - The executable graph in which to set the specified node.\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [out] isEnabled - Location to return the enabled status of the node.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphNodeGetEnabled( - hGraphExec: hipGraphExec_t, - hNode: hipGraphNode_t, - isEnabled: *mut ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a external semaphor wait node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create.\n @param [in] graph - instance of the graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memset execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] nodeParams -pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddExternalSemaphoresWaitNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - nodeParams: *const hipExternalSemaphoreWaitNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a external semaphor signal node and adds it to a graph.\n\n @param [out] pGraphNode - pointer to the graph node to create.\n @param [in] graph - instance of the graph to add the created node.\n @param [in] pDependencies - const pointer to the dependencies on the memset execution node.\n @param [in] numDependencies - the number of the dependencies.\n @param [in] nodeParams -pointer to the parameters.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphAddExternalSemaphoresSignalNode( - pGraphNode: *mut hipGraphNode_t, - graph: hipGraph_t, - pDependencies: *const hipGraphNode_t, - numDependencies: usize, - nodeParams: *const hipExternalSemaphoreSignalNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Updates node parameters in the external semaphore signal node.\n\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [in] nodeParams - Pointer to the params to be set.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExternalSemaphoresSignalNodeSetParams( - hNode: hipGraphNode_t, - nodeParams: *const hipExternalSemaphoreSignalNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Updates node parameters in the external semaphore wait node.\n\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [in] nodeParams - Pointer to the params to be set.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExternalSemaphoresWaitNodeSetParams( - hNode: hipGraphNode_t, - nodeParams: *const hipExternalSemaphoreWaitNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns external semaphore signal node params.\n\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [out] params_out - Pointer to params.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExternalSemaphoresSignalNodeGetParams( - hNode: hipGraphNode_t, - params_out: *mut hipExternalSemaphoreSignalNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns external semaphore wait node params.\n\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [out] params_out - Pointer to params.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExternalSemaphoresWaitNodeGetParams( - hNode: hipGraphNode_t, - params_out: *mut hipExternalSemaphoreWaitNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Updates node parameters in the external semaphore signal node in the given graphExec.\n\n @param [in] hGraphExec - The executable graph in which to set the specified node.\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [in] nodeParams - Pointer to the params to be set.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecExternalSemaphoresSignalNodeSetParams( - hGraphExec: hipGraphExec_t, - hNode: hipGraphNode_t, - nodeParams: *const hipExternalSemaphoreSignalNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Updates node parameters in the external semaphore wait node in the given graphExec.\n\n @param [in] hGraphExec - The executable graph in which to set the specified node.\n @param [in] hNode - Node from the graph from which graphExec was instantiated.\n @param [in] nodeParams - Pointer to the params to be set.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipGraphExecExternalSemaphoresWaitNodeSetParams( - hGraphExec: hipGraphExec_t, - hNode: hipGraphNode_t, - nodeParams: *const hipExternalSemaphoreWaitNodeParams, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memset node and adds it to a graph.\n\n @param [out] phGraphNode - pointer to graph node to create.\n @param [in] hGraph - instance of graph to add the created node to.\n @param [in] dependencies - const pointer to the dependencies on the memset execution node.\n @param [in] numDependencies - number of the dependencies.\n @param [in] memsetParams - const pointer to the parameters for the memory set.\n @param [in] ctx - cotext related to current device.\n @returns #hipSuccess, #hipErrorInvalidValue\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues."] - pub fn hipDrvGraphAddMemsetNode( - phGraphNode: *mut hipGraphNode_t, - hGraph: hipGraph_t, - dependencies: *const hipGraphNode_t, - numDependencies: usize, - memsetParams: *const HIP_MEMSET_NODE_PARAMS, - ctx: hipCtx_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Frees an address range reservation made via hipMemAddressReserve\n\n @param [in] devPtr - starting address of the range.\n @param [in] size - size of the range.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemAddressFree(devPtr: *mut ::std::os::raw::c_void, size: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Reserves an address range\n\n @param [out] ptr - starting address of the reserved range.\n @param [in] size - size of the reservation.\n @param [in] alignment - alignment of the address.\n @param [in] addr - requested starting address of the range.\n @param [in] flags - currently unused, must be zero.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemAddressReserve( - ptr: *mut *mut ::std::os::raw::c_void, - size: usize, - alignment: usize, - addr: *mut ::std::os::raw::c_void, - flags: ::std::os::raw::c_ulonglong, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Creates a memory allocation described by the properties and size\n\n @param [out] handle - value of the returned handle.\n @param [in] size - size of the allocation.\n @param [in] prop - properties of the allocation.\n @param [in] flags - currently unused, must be zero.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemCreate( - handle: *mut hipMemGenericAllocationHandle_t, - size: usize, - prop: *const hipMemAllocationProp, - flags: ::std::os::raw::c_ulonglong, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Exports an allocation to a requested shareable handle type.\n\n @param [out] shareableHandle - value of the returned handle.\n @param [in] handle - handle to share.\n @param [in] handleType - type of the shareable handle.\n @param [in] flags - currently unused, must be zero.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemExportToShareableHandle( - shareableHandle: *mut ::std::os::raw::c_void, - handle: hipMemGenericAllocationHandle_t, - handleType: hipMemAllocationHandleType, - flags: ::std::os::raw::c_ulonglong, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get the access flags set for the given location and ptr.\n\n @param [out] flags - flags for this location.\n @param [in] location - target location.\n @param [in] ptr - address to check the access flags.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemGetAccess( - flags: *mut ::std::os::raw::c_ulonglong, - location: *const hipMemLocation, - ptr: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Calculates either the minimal or recommended granularity.\n\n @param [out] granularity - returned granularity.\n @param [in] prop - location properties.\n @param [in] option - determines which granularity to return.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows.\n"] - pub fn hipMemGetAllocationGranularity( - granularity: *mut usize, - prop: *const hipMemAllocationProp, - option: hipMemAllocationGranularity_flags, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Retrieve the property structure of the given handle.\n\n @param [out] prop - properties of the given handle.\n @param [in] handle - handle to perform the query on.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux under development on Windows."] - pub fn hipMemGetAllocationPropertiesFromHandle( - prop: *mut hipMemAllocationProp, - handle: hipMemGenericAllocationHandle_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Imports an allocation from a requested shareable handle type.\n\n @param [out] handle - returned value.\n @param [in] osHandle - shareable handle representing the memory allocation.\n @param [in] shHandleType - handle type.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemImportFromShareableHandle( - handle: *mut hipMemGenericAllocationHandle_t, - osHandle: *mut ::std::os::raw::c_void, - shHandleType: hipMemAllocationHandleType, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Maps an allocation handle to a reserved virtual address range.\n\n @param [in] ptr - address where the memory will be mapped.\n @param [in] size - size of the mapping.\n @param [in] offset - offset into the memory, currently must be zero.\n @param [in] handle - memory allocation to be mapped.\n @param [in] flags - currently unused, must be zero.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemMap( - ptr: *mut ::std::os::raw::c_void, - size: usize, - offset: usize, - handle: hipMemGenericAllocationHandle_t, - flags: ::std::os::raw::c_ulonglong, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Maps or unmaps subregions of sparse HIP arrays and sparse HIP mipmapped arrays.\n\n @param [in] mapInfoList - list of hipArrayMapInfo.\n @param [in] count - number of hipArrayMapInfo in mapInfoList.\n @param [in] stream - stream identifier for the stream to use for map or unmap operations.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemMapArrayAsync( - mapInfoList: *mut hipArrayMapInfo, - count: ::std::os::raw::c_uint, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Release a memory handle representing a memory allocation which was previously allocated through hipMemCreate.\n\n @param [in] handle - handle of the memory allocation.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemRelease(handle: hipMemGenericAllocationHandle_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Returns the allocation handle of the backing memory allocation given the address.\n\n @param [out] handle - handle representing addr.\n @param [in] addr - address to look up.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemRetainAllocationHandle( - handle: *mut hipMemGenericAllocationHandle_t, - addr: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Set the access flags for each location specified in desc for the given virtual address range.\n\n @param [in] ptr - starting address of the virtual address range.\n @param [in] size - size of the range.\n @param [in] desc - array of hipMemAccessDesc.\n @param [in] count - number of hipMemAccessDesc in desc.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemSetAccess( - ptr: *mut ::std::os::raw::c_void, - size: usize, - desc: *const hipMemAccessDesc, - count: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Unmap memory allocation of a given address range.\n\n @param [in] ptr - starting address of the range to unmap.\n @param [in] size - size of the virtual address range.\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported\n @warning : This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @note This API is implemented on Linux, under development on Windows."] - pub fn hipMemUnmap(ptr: *mut ::std::os::raw::c_void, size: usize) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Maps a graphics resource for access.\n\n @param [in] count - Number of resources to map.\n @param [in] resources - Pointer of resources to map.\n @param [in] stream - Stream for synchronization.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown, #hipErrorInvalidResourceHandle\n"] - pub fn hipGraphicsMapResources( - count: ::std::os::raw::c_int, - resources: *mut hipGraphicsResource_t, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Get an array through which to access a subresource of a mapped graphics resource.\n\n @param [out] array - Pointer of array through which a subresource of resource may be accessed.\n @param [in] resource - Mapped resource to access.\n @param [in] arrayIndex - Array index for the subresource to access.\n @param [in] mipLevel - Mipmap level for the subresource to access.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n @note In this API, the value of arrayIndex higher than zero is currently not supported.\n"] - pub fn hipGraphicsSubResourceGetMappedArray( - array: *mut hipArray_t, - resource: hipGraphicsResource_t, - arrayIndex: ::std::os::raw::c_uint, - mipLevel: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Gets device accessible address of a graphics resource.\n\n @param [out] devPtr - Pointer of device through which graphic resource may be accessed.\n @param [out] size - Size of the buffer accessible from devPtr.\n @param [in] resource - Mapped resource to access.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipGraphicsResourceGetMappedPointer( - devPtr: *mut *mut ::std::os::raw::c_void, - size: *mut usize, - resource: hipGraphicsResource_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Unmaps graphics resources.\n\n @param [in] count - Number of resources to unmap.\n @param [in] resources - Pointer of resources to unmap.\n @param [in] stream - Stream for synchronization.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown, #hipErrorContextIsDestroyed\n"] - pub fn hipGraphicsUnmapResources( - count: ::std::os::raw::c_int, - resources: *mut hipGraphicsResource_t, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Unregisters a graphics resource.\n\n @param [in] resource - Graphics resources to unregister.\n\n @returns #hipSuccess\n"] - pub fn hipGraphicsUnregisterResource(resource: hipGraphicsResource_t) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Create a surface object.\n\n @param [out] pSurfObject Pointer of surface object to be created.\n @param [in] pResDesc Pointer of suface object descriptor.\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] - pub fn hipCreateSurfaceObject( - pSurfObject: *mut hipSurfaceObject_t, - pResDesc: *const hipResourceDesc, - ) -> hipError_t; -} -extern "C" { - #[must_use] - #[doc = " @brief Destroy a surface object.\n\n @param [in] surfaceObject Surface object to be destroyed.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] - pub fn hipDestroySurfaceObject(surfaceObject: hipSurfaceObject_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy_spt( - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpyToSymbol_spt( - symbol: *const ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpyFromSymbol_spt( - dst: *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy2D_spt( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy2DFromArray_spt( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: hipArray_const_t, - wOffset: usize, - hOffset: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy3D_spt(p: *const hipMemcpy3DParms) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemset_spt( - dst: *mut ::std::os::raw::c_void, - value: ::std::os::raw::c_int, - sizeBytes: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemsetAsync_spt( - dst: *mut ::std::os::raw::c_void, - value: ::std::os::raw::c_int, - sizeBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemset2D_spt( - dst: *mut ::std::os::raw::c_void, - pitch: usize, - value: ::std::os::raw::c_int, - width: usize, - height: usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemset2DAsync_spt( - dst: *mut ::std::os::raw::c_void, - pitch: usize, - value: ::std::os::raw::c_int, - width: usize, - height: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemset3DAsync_spt( - pitchedDevPtr: hipPitchedPtr, - value: ::std::os::raw::c_int, - extent: hipExtent, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemset3D_spt( - pitchedDevPtr: hipPitchedPtr, - value: ::std::os::raw::c_int, - extent: hipExtent, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpyAsync_spt( - dst: *mut ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy3DAsync_spt(p: *const hipMemcpy3DParms, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy2DAsync_spt( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpyFromSymbolAsync_spt( - dst: *mut ::std::os::raw::c_void, - symbol: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpyToSymbolAsync_spt( - symbol: *const ::std::os::raw::c_void, - src: *const ::std::os::raw::c_void, - sizeBytes: usize, - offset: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpyFromArray_spt( - dst: *mut ::std::os::raw::c_void, - src: hipArray_const_t, - wOffsetSrc: usize, - hOffset: usize, - count: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy2DToArray_spt( - dst: hipArray_t, - wOffset: usize, - hOffset: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy2DFromArrayAsync_spt( - dst: *mut ::std::os::raw::c_void, - dpitch: usize, - src: hipArray_const_t, - wOffsetSrc: usize, - hOffsetSrc: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipMemcpy2DToArrayAsync_spt( - dst: hipArray_t, - wOffset: usize, - hOffset: usize, - src: *const ::std::os::raw::c_void, - spitch: usize, - width: usize, - height: usize, - kind: hipMemcpyKind, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamQuery_spt(stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamSynchronize_spt(stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamGetPriority_spt( - stream: hipStream_t, - priority: *mut ::std::os::raw::c_int, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamWaitEvent_spt( - stream: hipStream_t, - event: hipEvent_t, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamGetFlags_spt( - stream: hipStream_t, - flags: *mut ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamAddCallback_spt( - stream: hipStream_t, - callback: hipStreamCallback_t, - userData: *mut ::std::os::raw::c_void, - flags: ::std::os::raw::c_uint, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipEventRecord_spt(event: hipEvent_t, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipLaunchCooperativeKernel_spt( - f: *const ::std::os::raw::c_void, - gridDim: dim3, - blockDim: dim3, - kernelParams: *mut *mut ::std::os::raw::c_void, - sharedMemBytes: u32, - hStream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipLaunchKernel_spt( - function_address: *const ::std::os::raw::c_void, - numBlocks: dim3, - dimBlocks: dim3, - args: *mut *mut ::std::os::raw::c_void, - sharedMemBytes: usize, - stream: hipStream_t, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipGraphLaunch_spt(graphExec: hipGraphExec_t, stream: hipStream_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamBeginCapture_spt(stream: hipStream_t, mode: hipStreamCaptureMode) - -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamEndCapture_spt(stream: hipStream_t, pGraph: *mut hipGraph_t) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamIsCapturing_spt( - stream: hipStream_t, - pCaptureStatus: *mut hipStreamCaptureStatus, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamGetCaptureInfo_spt( - stream: hipStream_t, - pCaptureStatus: *mut hipStreamCaptureStatus, - pId: *mut ::std::os::raw::c_ulonglong, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipStreamGetCaptureInfo_v2_spt( - stream: hipStream_t, - captureStatus_out: *mut hipStreamCaptureStatus, - id_out: *mut ::std::os::raw::c_ulonglong, - graph_out: *mut hipGraph_t, - dependencies_out: *mut *const hipGraphNode_t, - numDependencies_out: *mut usize, - ) -> hipError_t; -} -extern "C" { - #[must_use] - pub fn hipLaunchHostFunc_spt( - stream: hipStream_t, - fn_: hipHostFn_t, - userData: *mut ::std::os::raw::c_void, - ) -> hipError_t; -} diff --git a/ext/hip_runtime-sys/src/lib.rs b/ext/hip_runtime-sys/src/lib.rs index fe257566..4aad7e6e 100644 --- a/ext/hip_runtime-sys/src/lib.rs +++ b/ext/hip_runtime-sys/src/lib.rs @@ -1,3 +1,12668 @@ +// Generated automatically by zluda_bindgen +// DO NOT EDIT MANUALLY #![allow(warnings)] -pub mod hip_runtime_api; -pub use hip_runtime_api::*; \ No newline at end of file +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), + ); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!( + (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), + ); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +pub const hipTextureType1D: u32 = 1; +pub const hipTextureType2D: u32 = 2; +pub const hipTextureType3D: u32 = 3; +pub const hipTextureTypeCubemap: u32 = 12; +pub const hipTextureType1DLayered: u32 = 241; +pub const hipTextureType2DLayered: u32 = 242; +pub const hipTextureTypeCubemapLayered: u32 = 252; +pub const hipIpcMemLazyEnablePeerAccess: u32 = 1; +pub const hipStreamDefault: u32 = 0; +pub const hipStreamNonBlocking: u32 = 1; +pub const hipEventDefault: u32 = 0; +pub const hipEventBlockingSync: u32 = 1; +pub const hipEventDisableTiming: u32 = 2; +pub const hipEventInterprocess: u32 = 4; +pub const hipEventDisableSystemFence: u32 = 536870912; +pub const hipEventReleaseToDevice: u32 = 1073741824; +pub const hipEventReleaseToSystem: u32 = 2147483648; +pub const hipHostMallocDefault: u32 = 0; +pub const hipHostMallocPortable: u32 = 1; +pub const hipHostMallocMapped: u32 = 2; +pub const hipHostMallocWriteCombined: u32 = 4; +pub const hipHostMallocNumaUser: u32 = 536870912; +pub const hipHostMallocCoherent: u32 = 1073741824; +pub const hipHostMallocNonCoherent: u32 = 2147483648; +pub const hipMemAttachGlobal: u32 = 1; +pub const hipMemAttachHost: u32 = 2; +pub const hipMemAttachSingle: u32 = 4; +pub const hipDeviceMallocDefault: u32 = 0; +pub const hipDeviceMallocFinegrained: u32 = 1; +pub const hipMallocSignalMemory: u32 = 2; +pub const hipDeviceMallocUncached: u32 = 3; +pub const hipDeviceMallocContiguous: u32 = 4; +pub const hipHostRegisterDefault: u32 = 0; +pub const hipHostRegisterPortable: u32 = 1; +pub const hipHostRegisterMapped: u32 = 2; +pub const hipHostRegisterIoMemory: u32 = 4; +pub const hipHostRegisterReadOnly: u32 = 8; +pub const hipExtHostRegisterCoarseGrained: u32 = 8; +pub const hipDeviceScheduleAuto: u32 = 0; +pub const hipDeviceScheduleSpin: u32 = 1; +pub const hipDeviceScheduleYield: u32 = 2; +pub const hipDeviceScheduleBlockingSync: u32 = 4; +pub const hipDeviceScheduleMask: u32 = 7; +pub const hipDeviceMapHost: u32 = 8; +pub const hipDeviceLmemResizeToMax: u32 = 16; +pub const hipArrayDefault: u32 = 0; +pub const hipArrayLayered: u32 = 1; +pub const hipArraySurfaceLoadStore: u32 = 2; +pub const hipArrayCubemap: u32 = 4; +pub const hipArrayTextureGather: u32 = 8; +pub const hipOccupancyDefault: u32 = 0; +pub const hipOccupancyDisableCachingOverride: u32 = 1; +pub const hipCooperativeLaunchMultiDeviceNoPreSync: u32 = 1; +pub const hipCooperativeLaunchMultiDeviceNoPostSync: u32 = 2; +pub const hipExtAnyOrderLaunch: u32 = 1; +pub const hipStreamWaitValueGte: u32 = 0; +pub const hipStreamWaitValueEq: u32 = 1; +pub const hipStreamWaitValueAnd: u32 = 2; +pub const hipStreamWaitValueNor: u32 = 3; +pub const hipExternalMemoryDedicated: u32 = 1; +pub const hipGraphKernelNodePortDefault: u32 = 0; +pub const hipGraphKernelNodePortLaunchCompletion: u32 = 2; +pub const hipGraphKernelNodePortProgrammatic: u32 = 1; +#[doc = " @defgroup GlobalDefs Global enum and defines\n @{\n\n/\n/**\n hipDeviceArch_t\n"] +#[repr(C)] +#[repr(align(4))] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipDeviceArch_t { + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 3usize]>, + pub __bindgen_padding_0: u8, +} +impl hipDeviceArch_t { + #[inline] + pub fn hasGlobalInt32Atomics(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasGlobalInt32Atomics(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasGlobalFloatAtomicExch(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasGlobalFloatAtomicExch(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasSharedInt32Atomics(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasSharedInt32Atomics(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasSharedFloatAtomicExch(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasSharedFloatAtomicExch(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(3usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasFloatAtomicAdd(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasFloatAtomicAdd(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasGlobalInt64Atomics(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasGlobalInt64Atomics(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasSharedInt64Atomics(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasSharedInt64Atomics(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(6usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasDoubles(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasDoubles(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(7usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasWarpVote(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasWarpVote(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(8usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasWarpBallot(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasWarpBallot(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(9usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasWarpShuffle(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasWarpShuffle(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(10usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasFunnelShift(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasFunnelShift(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(11usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasThreadFenceSystem(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasThreadFenceSystem(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(12usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasSyncThreadsExt(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasSyncThreadsExt(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(13usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasSurfaceFuncs(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasSurfaceFuncs(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(14usize, 1u8, val as u64) + } + } + #[inline] + pub fn has3dGrid(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(15usize, 1u8) as u32) } + } + #[inline] + pub fn set_has3dGrid(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(15usize, 1u8, val as u64) + } + } + #[inline] + pub fn hasDynamicParallelism(&self) -> ::core::ffi::c_uint { + unsafe { ::core::mem::transmute(self._bitfield_1.get(16usize, 1u8) as u32) } + } + #[inline] + pub fn set_hasDynamicParallelism(&mut self, val: ::core::ffi::c_uint) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(16usize, 1u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + hasGlobalInt32Atomics: ::core::ffi::c_uint, + hasGlobalFloatAtomicExch: ::core::ffi::c_uint, + hasSharedInt32Atomics: ::core::ffi::c_uint, + hasSharedFloatAtomicExch: ::core::ffi::c_uint, + hasFloatAtomicAdd: ::core::ffi::c_uint, + hasGlobalInt64Atomics: ::core::ffi::c_uint, + hasSharedInt64Atomics: ::core::ffi::c_uint, + hasDoubles: ::core::ffi::c_uint, + hasWarpVote: ::core::ffi::c_uint, + hasWarpBallot: ::core::ffi::c_uint, + hasWarpShuffle: ::core::ffi::c_uint, + hasFunnelShift: ::core::ffi::c_uint, + hasThreadFenceSystem: ::core::ffi::c_uint, + hasSyncThreadsExt: ::core::ffi::c_uint, + hasSurfaceFuncs: ::core::ffi::c_uint, + has3dGrid: ::core::ffi::c_uint, + hasDynamicParallelism: ::core::ffi::c_uint, + ) -> __BindgenBitfieldUnit<[u8; 3usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); + __bindgen_bitfield_unit + .set( + 0usize, + 1u8, + { + let hasGlobalInt32Atomics: u32 = unsafe { + ::core::mem::transmute(hasGlobalInt32Atomics) + }; + hasGlobalInt32Atomics as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 1usize, + 1u8, + { + let hasGlobalFloatAtomicExch: u32 = unsafe { + ::core::mem::transmute(hasGlobalFloatAtomicExch) + }; + hasGlobalFloatAtomicExch as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 2usize, + 1u8, + { + let hasSharedInt32Atomics: u32 = unsafe { + ::core::mem::transmute(hasSharedInt32Atomics) + }; + hasSharedInt32Atomics as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 3usize, + 1u8, + { + let hasSharedFloatAtomicExch: u32 = unsafe { + ::core::mem::transmute(hasSharedFloatAtomicExch) + }; + hasSharedFloatAtomicExch as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 4usize, + 1u8, + { + let hasFloatAtomicAdd: u32 = unsafe { + ::core::mem::transmute(hasFloatAtomicAdd) + }; + hasFloatAtomicAdd as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 5usize, + 1u8, + { + let hasGlobalInt64Atomics: u32 = unsafe { + ::core::mem::transmute(hasGlobalInt64Atomics) + }; + hasGlobalInt64Atomics as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 6usize, + 1u8, + { + let hasSharedInt64Atomics: u32 = unsafe { + ::core::mem::transmute(hasSharedInt64Atomics) + }; + hasSharedInt64Atomics as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 7usize, + 1u8, + { + let hasDoubles: u32 = unsafe { ::core::mem::transmute(hasDoubles) }; + hasDoubles as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 8usize, + 1u8, + { + let hasWarpVote: u32 = unsafe { + ::core::mem::transmute(hasWarpVote) + }; + hasWarpVote as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 9usize, + 1u8, + { + let hasWarpBallot: u32 = unsafe { + ::core::mem::transmute(hasWarpBallot) + }; + hasWarpBallot as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 10usize, + 1u8, + { + let hasWarpShuffle: u32 = unsafe { + ::core::mem::transmute(hasWarpShuffle) + }; + hasWarpShuffle as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 11usize, + 1u8, + { + let hasFunnelShift: u32 = unsafe { + ::core::mem::transmute(hasFunnelShift) + }; + hasFunnelShift as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 12usize, + 1u8, + { + let hasThreadFenceSystem: u32 = unsafe { + ::core::mem::transmute(hasThreadFenceSystem) + }; + hasThreadFenceSystem as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 13usize, + 1u8, + { + let hasSyncThreadsExt: u32 = unsafe { + ::core::mem::transmute(hasSyncThreadsExt) + }; + hasSyncThreadsExt as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 14usize, + 1u8, + { + let hasSurfaceFuncs: u32 = unsafe { + ::core::mem::transmute(hasSurfaceFuncs) + }; + hasSurfaceFuncs as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 15usize, + 1u8, + { + let has3dGrid: u32 = unsafe { ::core::mem::transmute(has3dGrid) }; + has3dGrid as u64 + }, + ); + __bindgen_bitfield_unit + .set( + 16usize, + 1u8, + { + let hasDynamicParallelism: u32 = unsafe { + ::core::mem::transmute(hasDynamicParallelism) + }; + hasDynamicParallelism as u64 + }, + ); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipUUID_t { + pub bytes: [::core::ffi::c_char; 16usize], +} +pub type hipUUID = hipUUID_t; +/** hipDeviceProp +*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipDeviceProp_tR0600 { + ///< Device name. + pub name: [::core::ffi::c_char; 256usize], + ///< UUID of a device + pub uuid: hipUUID, + ///< 8-byte unique identifier. Only valid on windows + pub luid: [::core::ffi::c_char; 8usize], + ///< LUID node mask + pub luidDeviceNodeMask: ::core::ffi::c_uint, + ///< Size of global memory region (in bytes). + pub totalGlobalMem: usize, + ///< Size of shared memory per block (in bytes). + pub sharedMemPerBlock: usize, + ///< Registers per block. + pub regsPerBlock: ::core::ffi::c_int, + ///< Warp size. + pub warpSize: ::core::ffi::c_int, + /**< Maximum pitch in bytes allowed by memory copies +< pitched memory*/ + pub memPitch: usize, + ///< Max work items per work group or workgroup max size. + pub maxThreadsPerBlock: ::core::ffi::c_int, + ///< Max number of threads in each dimension (XYZ) of a block. + pub maxThreadsDim: [::core::ffi::c_int; 3usize], + ///< Max grid dimensions (XYZ). + pub maxGridSize: [::core::ffi::c_int; 3usize], + ///< Max clock frequency of the multiProcessors in khz. + pub clockRate: ::core::ffi::c_int, + /**< Size of shared constant memory region on the device +< (in bytes).*/ + pub totalConstMem: usize, + /**< Major compute capability. On HCC, this is an approximation and features may +< differ from CUDA CC. See the arch feature flags for portable ways to query +< feature caps.*/ + pub major: ::core::ffi::c_int, + /**< Minor compute capability. On HCC, this is an approximation and features may +< differ from CUDA CC. See the arch feature flags for portable ways to query +< feature caps.*/ + pub minor: ::core::ffi::c_int, + ///< Alignment requirement for textures + pub textureAlignment: usize, + ///< Pitch alignment requirement for texture references bound to + pub texturePitchAlignment: usize, + ///< Deprecated. Use asyncEngineCount instead + pub deviceOverlap: ::core::ffi::c_int, + ///< Number of multi-processors (compute units). + pub multiProcessorCount: ::core::ffi::c_int, + ///< Run time limit for kernels executed on the device + pub kernelExecTimeoutEnabled: ::core::ffi::c_int, + ///< APU vs dGPU + pub integrated: ::core::ffi::c_int, + ///< Check whether HIP can map host memory + pub canMapHostMemory: ::core::ffi::c_int, + ///< Compute mode. + pub computeMode: ::core::ffi::c_int, + ///< Maximum number of elements in 1D images + pub maxTexture1D: ::core::ffi::c_int, + ///< Maximum 1D mipmap texture size + pub maxTexture1DMipmap: ::core::ffi::c_int, + ///< Maximum size for 1D textures bound to linear memory + pub maxTexture1DLinear: ::core::ffi::c_int, + ///< Maximum dimensions (width, height) of 2D images, in image elements + pub maxTexture2D: [::core::ffi::c_int; 2usize], + ///< Maximum number of elements in 2D array mipmap of images + pub maxTexture2DMipmap: [::core::ffi::c_int; 2usize], + ///< Maximum 2D tex dimensions if tex are bound to pitched memory + pub maxTexture2DLinear: [::core::ffi::c_int; 3usize], + ///< Maximum 2D tex dimensions if gather has to be performed + pub maxTexture2DGather: [::core::ffi::c_int; 2usize], + /**< Maximum dimensions (width, height, depth) of 3D images, in image +< elements*/ + pub maxTexture3D: [::core::ffi::c_int; 3usize], + ///< Maximum alternate 3D texture dims + pub maxTexture3DAlt: [::core::ffi::c_int; 3usize], + ///< Maximum cubemap texture dims + pub maxTextureCubemap: ::core::ffi::c_int, + ///< Maximum number of elements in 1D array images + pub maxTexture1DLayered: [::core::ffi::c_int; 2usize], + ///< Maximum number of elements in 2D array images + pub maxTexture2DLayered: [::core::ffi::c_int; 3usize], + ///< Maximum cubemaps layered texture dims + pub maxTextureCubemapLayered: [::core::ffi::c_int; 2usize], + ///< Maximum 1D surface size + pub maxSurface1D: ::core::ffi::c_int, + ///< Maximum 2D surface size + pub maxSurface2D: [::core::ffi::c_int; 2usize], + ///< Maximum 3D surface size + pub maxSurface3D: [::core::ffi::c_int; 3usize], + ///< Maximum 1D layered surface size + pub maxSurface1DLayered: [::core::ffi::c_int; 2usize], + ///< Maximum 2D layared surface size + pub maxSurface2DLayered: [::core::ffi::c_int; 3usize], + ///< Maximum cubemap surface size + pub maxSurfaceCubemap: ::core::ffi::c_int, + ///< Maximum cubemap layered surface size + pub maxSurfaceCubemapLayered: [::core::ffi::c_int; 2usize], + ///< Alignment requirement for surface + pub surfaceAlignment: usize, + ///< Device can possibly execute multiple kernels concurrently. + pub concurrentKernels: ::core::ffi::c_int, + ///< Device has ECC support enabled + pub ECCEnabled: ::core::ffi::c_int, + ///< PCI Bus ID. + pub pciBusID: ::core::ffi::c_int, + ///< PCI Device ID. + pub pciDeviceID: ::core::ffi::c_int, + ///< PCI Domain ID + pub pciDomainID: ::core::ffi::c_int, + ///< 1:If device is Tesla device using TCC driver, else 0 + pub tccDriver: ::core::ffi::c_int, + ///< Number of async engines + pub asyncEngineCount: ::core::ffi::c_int, + ///< Does device and host share unified address space + pub unifiedAddressing: ::core::ffi::c_int, + ///< Max global memory clock frequency in khz. + pub memoryClockRate: ::core::ffi::c_int, + ///< Global memory bus width in bits. + pub memoryBusWidth: ::core::ffi::c_int, + ///< L2 cache size. + pub l2CacheSize: ::core::ffi::c_int, + ///< Device's max L2 persisting lines in bytes + pub persistingL2CacheMaxSize: ::core::ffi::c_int, + ///< Maximum resident threads per multi-processor. + pub maxThreadsPerMultiProcessor: ::core::ffi::c_int, + ///< Device supports stream priority + pub streamPrioritiesSupported: ::core::ffi::c_int, + ///< Indicates globals are cached in L1 + pub globalL1CacheSupported: ::core::ffi::c_int, + ///< Locals are cahced in L1 + pub localL1CacheSupported: ::core::ffi::c_int, + ///< Amount of shared memory available per multiprocessor. + pub sharedMemPerMultiprocessor: usize, + ///< registers available per multiprocessor + pub regsPerMultiprocessor: ::core::ffi::c_int, + ///< Device supports allocating managed memory on this system + pub managedMemory: ::core::ffi::c_int, + ///< 1 if device is on a multi-GPU board, 0 if not. + pub isMultiGpuBoard: ::core::ffi::c_int, + ///< Unique identifier for a group of devices on same multiboard GPU + pub multiGpuBoardGroupID: ::core::ffi::c_int, + ///< Link between host and device supports native atomics + pub hostNativeAtomicSupported: ::core::ffi::c_int, + ///< Deprecated. CUDA only. + pub singleToDoublePrecisionPerfRatio: ::core::ffi::c_int, + /**< Device supports coherently accessing pageable memory +< without calling hipHostRegister on it*/ + pub pageableMemoryAccess: ::core::ffi::c_int, + /**< Device can coherently access managed memory concurrently with +< the CPU*/ + pub concurrentManagedAccess: ::core::ffi::c_int, + ///< Is compute preemption supported on the device + pub computePreemptionSupported: ::core::ffi::c_int, + /**< Device can access host registered memory with same +< address as the host*/ + pub canUseHostPointerForRegisteredMem: ::core::ffi::c_int, + ///< HIP device supports cooperative launch + pub cooperativeLaunch: ::core::ffi::c_int, + /**< HIP device supports cooperative launch on multiple +< devices*/ + pub cooperativeMultiDeviceLaunch: ::core::ffi::c_int, + ///< Per device m ax shared mem per block usable by special opt in + pub sharedMemPerBlockOptin: usize, + /**< Device accesses pageable memory via the host's +< page tables*/ + pub pageableMemoryAccessUsesHostPageTables: ::core::ffi::c_int, + /**< Host can directly access managed memory on the device +< without migration*/ + pub directManagedMemAccessFromHost: ::core::ffi::c_int, + ///< Max number of blocks on CU + pub maxBlocksPerMultiProcessor: ::core::ffi::c_int, + ///< Max value of access policy window + pub accessPolicyMaxWindowSize: ::core::ffi::c_int, + ///< Shared memory reserved by driver per block + pub reservedSharedMemPerBlock: usize, + ///< Device supports hipHostRegister + pub hostRegisterSupported: ::core::ffi::c_int, + ///< Indicates if device supports sparse hip arrays + pub sparseHipArraySupported: ::core::ffi::c_int, + /**< Device supports using the hipHostRegisterReadOnly flag +< with hipHostRegistger*/ + pub hostRegisterReadOnlySupported: ::core::ffi::c_int, + ///< Indicates external timeline semaphore support + pub timelineSemaphoreInteropSupported: ::core::ffi::c_int, + ///< Indicates if device supports hipMallocAsync and hipMemPool APIs + pub memoryPoolsSupported: ::core::ffi::c_int, + ///< Indicates device support of RDMA APIs + pub gpuDirectRDMASupported: ::core::ffi::c_int, + /**< Bitmask to be interpreted according to +< hipFlushGPUDirectRDMAWritesOptions*/ + pub gpuDirectRDMAFlushWritesOptions: ::core::ffi::c_uint, + ///< value of hipGPUDirectRDMAWritesOrdering + pub gpuDirectRDMAWritesOrdering: ::core::ffi::c_int, + ///< Bitmask of handle types support with mempool based IPC + pub memoryPoolSupportedHandleTypes: ::core::ffi::c_uint, + /**< Device supports deferred mapping HIP arrays and HIP +< mipmapped arrays*/ + pub deferredMappingHipArraySupported: ::core::ffi::c_int, + ///< Device supports IPC events + pub ipcEventSupported: ::core::ffi::c_int, + ///< Device supports cluster launch + pub clusterLaunch: ::core::ffi::c_int, + ///< Indicates device supports unified function pointers + pub unifiedFunctionPointers: ::core::ffi::c_int, + ///< CUDA Reserved. + pub reserved: [::core::ffi::c_int; 63usize], + ///< Reserved for adding new entries for HIP/CUDA. + pub hipReserved: [::core::ffi::c_int; 32usize], + ///< AMD GCN Arch Name. HIP Only. + pub gcnArchName: [::core::ffi::c_char; 256usize], + ///< Maximum Shared Memory Per CU. HIP Only. + pub maxSharedMemoryPerMultiProcessor: usize, + /**< Frequency in khz of the timer used by the device-side "clock*" +< instructions. New for HIP.*/ + pub clockInstructionRate: ::core::ffi::c_int, + ///< Architectural feature flags. New for HIP. + pub arch: hipDeviceArch_t, + ///< Addres of HDP_MEM_COHERENCY_FLUSH_CNTL register + pub hdpMemFlushCntl: *mut ::core::ffi::c_uint, + ///< Addres of HDP_REG_COHERENCY_FLUSH_CNTL register + pub hdpRegFlushCntl: *mut ::core::ffi::c_uint, + /**< HIP device supports cooperative launch on +< multiple*/ + pub cooperativeMultiDeviceUnmatchedFunc: ::core::ffi::c_int, + /**< HIP device supports cooperative launch on +< multiple*/ + pub cooperativeMultiDeviceUnmatchedGridDim: ::core::ffi::c_int, + /**< HIP device supports cooperative launch on +< multiple*/ + pub cooperativeMultiDeviceUnmatchedBlockDim: ::core::ffi::c_int, + /**< HIP device supports cooperative launch on +< multiple*/ + pub cooperativeMultiDeviceUnmatchedSharedMem: ::core::ffi::c_int, + ///< 1: if it is a large PCI bar device, else 0 + pub isLargeBar: ::core::ffi::c_int, + ///< Revision of the GPU in this device + pub asicRevision: ::core::ffi::c_int, +} +impl hipMemoryType { + ///< Unregistered memory + pub const hipMemoryTypeUnregistered: hipMemoryType = hipMemoryType(0); +} +impl hipMemoryType { + ///< Memory is physically located on host + pub const hipMemoryTypeHost: hipMemoryType = hipMemoryType(1); +} +impl hipMemoryType { + /**< Memory is physically located on device. (see deviceId for +< specific device)*/ + pub const hipMemoryTypeDevice: hipMemoryType = hipMemoryType(2); +} +impl hipMemoryType { + /**< Managed memory, automaticallly managed by the unified +< memory system +< place holder for new values.*/ + pub const hipMemoryTypeManaged: hipMemoryType = hipMemoryType(3); +} +impl hipMemoryType { + /**< Array memory, physically located on device. (see deviceId for +< specific device)*/ + pub const hipMemoryTypeArray: hipMemoryType = hipMemoryType(10); +} +impl hipMemoryType { + ///< unified address space + pub const hipMemoryTypeUnified: hipMemoryType = hipMemoryType(11); +} +#[repr(transparent)] +/** hipMemoryType (for pointer attributes) + + @note hipMemoryType enum values are combination of cudaMemoryType and cuMemoryType and AMD specific enum values. +*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemoryType(pub ::core::ffi::c_uint); +/// Pointer attributes +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipPointerAttribute_t { + pub type_: hipMemoryType, + pub device: ::core::ffi::c_int, + pub devicePointer: *mut ::core::ffi::c_void, + pub hostPointer: *mut ::core::ffi::c_void, + pub isManaged: ::core::ffi::c_int, + pub allocationFlags: ::core::ffi::c_uint, +} +impl hipDeviceAttribute_t { + pub const hipDeviceAttributeCudaCompatibleBegin: hipDeviceAttribute_t = hipDeviceAttribute_t( + 0, + ); +} +impl hipDeviceAttribute_t { + ///< Whether ECC support is enabled. + pub const hipDeviceAttributeEccEnabled: hipDeviceAttribute_t = hipDeviceAttribute_t( + 0, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. The maximum size of the window policy in bytes. + pub const hipDeviceAttributeAccessPolicyMaxWindowSize: hipDeviceAttribute_t = hipDeviceAttribute_t( + 1, + ); +} +impl hipDeviceAttribute_t { + ///< Asynchronous engines number. + pub const hipDeviceAttributeAsyncEngineCount: hipDeviceAttribute_t = hipDeviceAttribute_t( + 2, + ); +} +impl hipDeviceAttribute_t { + ///< Whether host memory can be mapped into device address space + pub const hipDeviceAttributeCanMapHostMemory: hipDeviceAttribute_t = hipDeviceAttribute_t( + 3, + ); +} +impl hipDeviceAttribute_t { + /**< Device can access host registered memory +< at the same virtual address as the CPU*/ + pub const hipDeviceAttributeCanUseHostPointerForRegisteredMem: hipDeviceAttribute_t = hipDeviceAttribute_t( + 4, + ); +} +impl hipDeviceAttribute_t { + ///< Peak clock frequency in kilohertz. + pub const hipDeviceAttributeClockRate: hipDeviceAttribute_t = hipDeviceAttribute_t( + 5, + ); +} +impl hipDeviceAttribute_t { + ///< Compute mode that device is currently in. + pub const hipDeviceAttributeComputeMode: hipDeviceAttribute_t = hipDeviceAttribute_t( + 6, + ); +} +impl hipDeviceAttribute_t { + ///< Device supports Compute Preemption. + pub const hipDeviceAttributeComputePreemptionSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 7, + ); +} +impl hipDeviceAttribute_t { + ///< Device can possibly execute multiple kernels concurrently. + pub const hipDeviceAttributeConcurrentKernels: hipDeviceAttribute_t = hipDeviceAttribute_t( + 8, + ); +} +impl hipDeviceAttribute_t { + ///< Device can coherently access managed memory concurrently with the CPU + pub const hipDeviceAttributeConcurrentManagedAccess: hipDeviceAttribute_t = hipDeviceAttribute_t( + 9, + ); +} +impl hipDeviceAttribute_t { + ///< Support cooperative launch + pub const hipDeviceAttributeCooperativeLaunch: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10, + ); +} +impl hipDeviceAttribute_t { + ///< Support cooperative launch on multiple devices + pub const hipDeviceAttributeCooperativeMultiDeviceLaunch: hipDeviceAttribute_t = hipDeviceAttribute_t( + 11, + ); +} +impl hipDeviceAttribute_t { + /**< Device can concurrently copy memory and execute a kernel. +< Deprecated. Use instead asyncEngineCount.*/ + pub const hipDeviceAttributeDeviceOverlap: hipDeviceAttribute_t = hipDeviceAttribute_t( + 12, + ); +} +impl hipDeviceAttribute_t { + /**< Host can directly access managed memory on +< the device without migration*/ + pub const hipDeviceAttributeDirectManagedMemAccessFromHost: hipDeviceAttribute_t = hipDeviceAttribute_t( + 13, + ); +} +impl hipDeviceAttribute_t { + ///< Device supports caching globals in L1 + pub const hipDeviceAttributeGlobalL1CacheSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 14, + ); +} +impl hipDeviceAttribute_t { + ///< Link between the device and the host supports native atomic operations + pub const hipDeviceAttributeHostNativeAtomicSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 15, + ); +} +impl hipDeviceAttribute_t { + ///< Device is integrated GPU + pub const hipDeviceAttributeIntegrated: hipDeviceAttribute_t = hipDeviceAttribute_t( + 16, + ); +} +impl hipDeviceAttribute_t { + ///< Multiple GPU devices. + pub const hipDeviceAttributeIsMultiGpuBoard: hipDeviceAttribute_t = hipDeviceAttribute_t( + 17, + ); +} +impl hipDeviceAttribute_t { + ///< Run time limit for kernels executed on the device + pub const hipDeviceAttributeKernelExecTimeout: hipDeviceAttribute_t = hipDeviceAttribute_t( + 18, + ); +} +impl hipDeviceAttribute_t { + ///< Size of L2 cache in bytes. 0 if the device doesn't have L2 cache. + pub const hipDeviceAttributeL2CacheSize: hipDeviceAttribute_t = hipDeviceAttribute_t( + 19, + ); +} +impl hipDeviceAttribute_t { + ///< caching locals in L1 is supported + pub const hipDeviceAttributeLocalL1CacheSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 20, + ); +} +impl hipDeviceAttribute_t { + ///< 8-byte locally unique identifier in 8 bytes. Undefined on TCC and non-Windows platforms + pub const hipDeviceAttributeLuid: hipDeviceAttribute_t = hipDeviceAttribute_t(21); +} +impl hipDeviceAttribute_t { + ///< Luid device node mask. Undefined on TCC and non-Windows platforms + pub const hipDeviceAttributeLuidDeviceNodeMask: hipDeviceAttribute_t = hipDeviceAttribute_t( + 22, + ); +} +impl hipDeviceAttribute_t { + ///< Major compute capability version number. + pub const hipDeviceAttributeComputeCapabilityMajor: hipDeviceAttribute_t = hipDeviceAttribute_t( + 23, + ); +} +impl hipDeviceAttribute_t { + ///< Device supports allocating managed memory on this system + pub const hipDeviceAttributeManagedMemory: hipDeviceAttribute_t = hipDeviceAttribute_t( + 24, + ); +} +impl hipDeviceAttribute_t { + ///< Max block size per multiprocessor + pub const hipDeviceAttributeMaxBlocksPerMultiProcessor: hipDeviceAttribute_t = hipDeviceAttribute_t( + 25, + ); +} +impl hipDeviceAttribute_t { + ///< Max block size in width. + pub const hipDeviceAttributeMaxBlockDimX: hipDeviceAttribute_t = hipDeviceAttribute_t( + 26, + ); +} +impl hipDeviceAttribute_t { + ///< Max block size in height. + pub const hipDeviceAttributeMaxBlockDimY: hipDeviceAttribute_t = hipDeviceAttribute_t( + 27, + ); +} +impl hipDeviceAttribute_t { + ///< Max block size in depth. + pub const hipDeviceAttributeMaxBlockDimZ: hipDeviceAttribute_t = hipDeviceAttribute_t( + 28, + ); +} +impl hipDeviceAttribute_t { + ///< Max grid size in width. + pub const hipDeviceAttributeMaxGridDimX: hipDeviceAttribute_t = hipDeviceAttribute_t( + 29, + ); +} +impl hipDeviceAttribute_t { + ///< Max grid size in height. + pub const hipDeviceAttributeMaxGridDimY: hipDeviceAttribute_t = hipDeviceAttribute_t( + 30, + ); +} +impl hipDeviceAttribute_t { + ///< Max grid size in depth. + pub const hipDeviceAttributeMaxGridDimZ: hipDeviceAttribute_t = hipDeviceAttribute_t( + 31, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum size of 1D surface. + pub const hipDeviceAttributeMaxSurface1D: hipDeviceAttribute_t = hipDeviceAttribute_t( + 32, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. Maximum dimensions of 1D layered surface. + pub const hipDeviceAttributeMaxSurface1DLayered: hipDeviceAttribute_t = hipDeviceAttribute_t( + 33, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension (width, height) of 2D surface. + pub const hipDeviceAttributeMaxSurface2D: hipDeviceAttribute_t = hipDeviceAttribute_t( + 34, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. Maximum dimensions of 2D layered surface. + pub const hipDeviceAttributeMaxSurface2DLayered: hipDeviceAttribute_t = hipDeviceAttribute_t( + 35, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension (width, height, depth) of 3D surface. + pub const hipDeviceAttributeMaxSurface3D: hipDeviceAttribute_t = hipDeviceAttribute_t( + 36, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. Maximum dimensions of Cubemap surface. + pub const hipDeviceAttributeMaxSurfaceCubemap: hipDeviceAttribute_t = hipDeviceAttribute_t( + 37, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. Maximum dimension of Cubemap layered surface. + pub const hipDeviceAttributeMaxSurfaceCubemapLayered: hipDeviceAttribute_t = hipDeviceAttribute_t( + 38, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum size of 1D texture. + pub const hipDeviceAttributeMaxTexture1DWidth: hipDeviceAttribute_t = hipDeviceAttribute_t( + 39, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions of 1D layered texture. + pub const hipDeviceAttributeMaxTexture1DLayered: hipDeviceAttribute_t = hipDeviceAttribute_t( + 40, + ); +} +impl hipDeviceAttribute_t { + /**< Maximum number of elements allocatable in a 1D linear texture. +< Use cudaDeviceGetTexture1DLinearMaxWidth() instead on Cuda.*/ + pub const hipDeviceAttributeMaxTexture1DLinear: hipDeviceAttribute_t = hipDeviceAttribute_t( + 41, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum size of 1D mipmapped texture. + pub const hipDeviceAttributeMaxTexture1DMipmap: hipDeviceAttribute_t = hipDeviceAttribute_t( + 42, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension width of 2D texture. + pub const hipDeviceAttributeMaxTexture2DWidth: hipDeviceAttribute_t = hipDeviceAttribute_t( + 43, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension hight of 2D texture. + pub const hipDeviceAttributeMaxTexture2DHeight: hipDeviceAttribute_t = hipDeviceAttribute_t( + 44, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions of 2D texture if gather operations performed. + pub const hipDeviceAttributeMaxTexture2DGather: hipDeviceAttribute_t = hipDeviceAttribute_t( + 45, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions of 2D layered texture. + pub const hipDeviceAttributeMaxTexture2DLayered: hipDeviceAttribute_t = hipDeviceAttribute_t( + 46, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions (width, height, pitch) of 2D textures bound to pitched memory. + pub const hipDeviceAttributeMaxTexture2DLinear: hipDeviceAttribute_t = hipDeviceAttribute_t( + 47, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions of 2D mipmapped texture. + pub const hipDeviceAttributeMaxTexture2DMipmap: hipDeviceAttribute_t = hipDeviceAttribute_t( + 48, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension width of 3D texture. + pub const hipDeviceAttributeMaxTexture3DWidth: hipDeviceAttribute_t = hipDeviceAttribute_t( + 49, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension height of 3D texture. + pub const hipDeviceAttributeMaxTexture3DHeight: hipDeviceAttribute_t = hipDeviceAttribute_t( + 50, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension depth of 3D texture. + pub const hipDeviceAttributeMaxTexture3DDepth: hipDeviceAttribute_t = hipDeviceAttribute_t( + 51, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions of alternate 3D texture. + pub const hipDeviceAttributeMaxTexture3DAlt: hipDeviceAttribute_t = hipDeviceAttribute_t( + 52, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions of Cubemap texture + pub const hipDeviceAttributeMaxTextureCubemap: hipDeviceAttribute_t = hipDeviceAttribute_t( + 53, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimensions of Cubemap layered texture. + pub const hipDeviceAttributeMaxTextureCubemapLayered: hipDeviceAttribute_t = hipDeviceAttribute_t( + 54, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum dimension of a block + pub const hipDeviceAttributeMaxThreadsDim: hipDeviceAttribute_t = hipDeviceAttribute_t( + 55, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum number of threads per block. + pub const hipDeviceAttributeMaxThreadsPerBlock: hipDeviceAttribute_t = hipDeviceAttribute_t( + 56, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum resident threads per multiprocessor. + pub const hipDeviceAttributeMaxThreadsPerMultiProcessor: hipDeviceAttribute_t = hipDeviceAttribute_t( + 57, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum pitch in bytes allowed by memory copies + pub const hipDeviceAttributeMaxPitch: hipDeviceAttribute_t = hipDeviceAttribute_t( + 58, + ); +} +impl hipDeviceAttribute_t { + ///< Global memory bus width in bits. + pub const hipDeviceAttributeMemoryBusWidth: hipDeviceAttribute_t = hipDeviceAttribute_t( + 59, + ); +} +impl hipDeviceAttribute_t { + ///< Peak memory clock frequency in kilohertz. + pub const hipDeviceAttributeMemoryClockRate: hipDeviceAttribute_t = hipDeviceAttribute_t( + 60, + ); +} +impl hipDeviceAttribute_t { + ///< Minor compute capability version number. + pub const hipDeviceAttributeComputeCapabilityMinor: hipDeviceAttribute_t = hipDeviceAttribute_t( + 61, + ); +} +impl hipDeviceAttribute_t { + ///< Unique ID of device group on the same multi-GPU board + pub const hipDeviceAttributeMultiGpuBoardGroupID: hipDeviceAttribute_t = hipDeviceAttribute_t( + 62, + ); +} +impl hipDeviceAttribute_t { + ///< Number of multiprocessors on the device. + pub const hipDeviceAttributeMultiprocessorCount: hipDeviceAttribute_t = hipDeviceAttribute_t( + 63, + ); +} +impl hipDeviceAttribute_t { + ///< Previously hipDeviceAttributeName + pub const hipDeviceAttributeUnused1: hipDeviceAttribute_t = hipDeviceAttribute_t(64); +} +impl hipDeviceAttribute_t { + /**< Device supports coherently accessing pageable memory +< without calling hipHostRegister on it*/ + pub const hipDeviceAttributePageableMemoryAccess: hipDeviceAttribute_t = hipDeviceAttribute_t( + 65, + ); +} +impl hipDeviceAttribute_t { + ///< Device accesses pageable memory via the host's page tables + pub const hipDeviceAttributePageableMemoryAccessUsesHostPageTables: hipDeviceAttribute_t = hipDeviceAttribute_t( + 66, + ); +} +impl hipDeviceAttribute_t { + ///< PCI Bus ID. + pub const hipDeviceAttributePciBusId: hipDeviceAttribute_t = hipDeviceAttribute_t( + 67, + ); +} +impl hipDeviceAttribute_t { + ///< PCI Device ID. + pub const hipDeviceAttributePciDeviceId: hipDeviceAttribute_t = hipDeviceAttribute_t( + 68, + ); +} +impl hipDeviceAttribute_t { + ///< PCI Domain ID. + pub const hipDeviceAttributePciDomainID: hipDeviceAttribute_t = hipDeviceAttribute_t( + 69, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum l2 persisting lines capacity in bytes + pub const hipDeviceAttributePersistingL2CacheMaxSize: hipDeviceAttribute_t = hipDeviceAttribute_t( + 70, + ); +} +impl hipDeviceAttribute_t { + /**< 32-bit registers available to a thread block. This number is shared +< by all thread blocks simultaneously resident on a multiprocessor.*/ + pub const hipDeviceAttributeMaxRegistersPerBlock: hipDeviceAttribute_t = hipDeviceAttribute_t( + 71, + ); +} +impl hipDeviceAttribute_t { + ///< 32-bit registers available per block. + pub const hipDeviceAttributeMaxRegistersPerMultiprocessor: hipDeviceAttribute_t = hipDeviceAttribute_t( + 72, + ); +} +impl hipDeviceAttribute_t { + ///< Shared memory reserved by CUDA driver per block. + pub const hipDeviceAttributeReservedSharedMemPerBlock: hipDeviceAttribute_t = hipDeviceAttribute_t( + 73, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum shared memory available per block in bytes. + pub const hipDeviceAttributeMaxSharedMemoryPerBlock: hipDeviceAttribute_t = hipDeviceAttribute_t( + 74, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum shared memory per block usable by special opt in. + pub const hipDeviceAttributeSharedMemPerBlockOptin: hipDeviceAttribute_t = hipDeviceAttribute_t( + 75, + ); +} +impl hipDeviceAttribute_t { + ///< Shared memory available per multiprocessor. + pub const hipDeviceAttributeSharedMemPerMultiprocessor: hipDeviceAttribute_t = hipDeviceAttribute_t( + 76, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. Performance ratio of single precision to double precision. + pub const hipDeviceAttributeSingleToDoublePrecisionPerfRatio: hipDeviceAttribute_t = hipDeviceAttribute_t( + 77, + ); +} +impl hipDeviceAttribute_t { + ///< Whether to support stream priorities. + pub const hipDeviceAttributeStreamPrioritiesSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 78, + ); +} +impl hipDeviceAttribute_t { + ///< Alignment requirement for surfaces + pub const hipDeviceAttributeSurfaceAlignment: hipDeviceAttribute_t = hipDeviceAttribute_t( + 79, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. Whether device is a Tesla device using TCC driver + pub const hipDeviceAttributeTccDriver: hipDeviceAttribute_t = hipDeviceAttribute_t( + 80, + ); +} +impl hipDeviceAttribute_t { + ///< Alignment requirement for textures + pub const hipDeviceAttributeTextureAlignment: hipDeviceAttribute_t = hipDeviceAttribute_t( + 81, + ); +} +impl hipDeviceAttribute_t { + ///< Pitch alignment requirement for 2D texture references bound to pitched memory; + pub const hipDeviceAttributeTexturePitchAlignment: hipDeviceAttribute_t = hipDeviceAttribute_t( + 82, + ); +} +impl hipDeviceAttribute_t { + ///< Constant memory size in bytes. + pub const hipDeviceAttributeTotalConstantMemory: hipDeviceAttribute_t = hipDeviceAttribute_t( + 83, + ); +} +impl hipDeviceAttribute_t { + ///< Global memory available on devicice. + pub const hipDeviceAttributeTotalGlobalMem: hipDeviceAttribute_t = hipDeviceAttribute_t( + 84, + ); +} +impl hipDeviceAttribute_t { + ///< Cuda only. An unified address space shared with the host. + pub const hipDeviceAttributeUnifiedAddressing: hipDeviceAttribute_t = hipDeviceAttribute_t( + 85, + ); +} +impl hipDeviceAttribute_t { + ///< Previously hipDeviceAttributeUuid + pub const hipDeviceAttributeUnused2: hipDeviceAttribute_t = hipDeviceAttribute_t(86); +} +impl hipDeviceAttribute_t { + ///< Warp size in threads. + pub const hipDeviceAttributeWarpSize: hipDeviceAttribute_t = hipDeviceAttribute_t( + 87, + ); +} +impl hipDeviceAttribute_t { + ///< Device supports HIP Stream Ordered Memory Allocator + pub const hipDeviceAttributeMemoryPoolsSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 88, + ); +} +impl hipDeviceAttribute_t { + ///< Device supports HIP virtual memory management + pub const hipDeviceAttributeVirtualMemoryManagementSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 89, + ); +} +impl hipDeviceAttribute_t { + ///< Can device support host memory registration via hipHostRegister + pub const hipDeviceAttributeHostRegisterSupported: hipDeviceAttribute_t = hipDeviceAttribute_t( + 90, + ); +} +impl hipDeviceAttribute_t { + ///< Supported handle mask for HIP Stream Ordered Memory Allocator + pub const hipDeviceAttributeMemoryPoolSupportedHandleTypes: hipDeviceAttribute_t = hipDeviceAttribute_t( + 91, + ); +} +impl hipDeviceAttribute_t { + pub const hipDeviceAttributeCudaCompatibleEnd: hipDeviceAttribute_t = hipDeviceAttribute_t( + 9999, + ); +} +impl hipDeviceAttribute_t { + pub const hipDeviceAttributeAmdSpecificBegin: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10000, + ); +} +impl hipDeviceAttribute_t { + ///< Frequency in khz of the timer used by the device-side "clock*" + pub const hipDeviceAttributeClockInstructionRate: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10000, + ); +} +impl hipDeviceAttribute_t { + ///< Previously hipDeviceAttributeArch + pub const hipDeviceAttributeUnused3: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10001, + ); +} +impl hipDeviceAttribute_t { + ///< Maximum Shared Memory PerMultiprocessor. + pub const hipDeviceAttributeMaxSharedMemoryPerMultiprocessor: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10002, + ); +} +impl hipDeviceAttribute_t { + ///< Previously hipDeviceAttributeGcnArch + pub const hipDeviceAttributeUnused4: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10003, + ); +} +impl hipDeviceAttribute_t { + ///< Previously hipDeviceAttributeGcnArchName + pub const hipDeviceAttributeUnused5: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10004, + ); +} +impl hipDeviceAttribute_t { + ///< Address of the HDP_MEM_COHERENCY_FLUSH_CNTL register + pub const hipDeviceAttributeHdpMemFlushCntl: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10005, + ); +} +impl hipDeviceAttribute_t { + ///< Address of the HDP_REG_COHERENCY_FLUSH_CNTL register + pub const hipDeviceAttributeHdpRegFlushCntl: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10006, + ); +} +impl hipDeviceAttribute_t { + /**< Supports cooperative launch on multiple +< devices with unmatched functions*/ + pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedFunc: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10007, + ); +} +impl hipDeviceAttribute_t { + /**< Supports cooperative launch on multiple +< devices with unmatched grid dimensions*/ + pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedGridDim: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10008, + ); +} +impl hipDeviceAttribute_t { + /**< Supports cooperative launch on multiple +< devices with unmatched block dimensions*/ + pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedBlockDim: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10009, + ); +} +impl hipDeviceAttribute_t { + /**< Supports cooperative launch on multiple +< devices with unmatched shared memories*/ + pub const hipDeviceAttributeCooperativeMultiDeviceUnmatchedSharedMem: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10010, + ); +} +impl hipDeviceAttribute_t { + ///< Whether it is LargeBar + pub const hipDeviceAttributeIsLargeBar: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10011, + ); +} +impl hipDeviceAttribute_t { + ///< Revision of the GPU in this device + pub const hipDeviceAttributeAsicRevision: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10012, + ); +} +impl hipDeviceAttribute_t { + /**< '1' if Device supports hipStreamWaitValue32() and +< hipStreamWaitValue64(), '0' otherwise.*/ + pub const hipDeviceAttributeCanUseStreamWaitValue: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10013, + ); +} +impl hipDeviceAttribute_t { + ///< '1' if Device supports image, '0' otherwise. + pub const hipDeviceAttributeImageSupport: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10014, + ); +} +impl hipDeviceAttribute_t { + /**< All available physical compute +< units for the device*/ + pub const hipDeviceAttributePhysicalMultiProcessorCount: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10015, + ); +} +impl hipDeviceAttribute_t { + ///< '1' if Device supports fine grain, '0' otherwise + pub const hipDeviceAttributeFineGrainSupport: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10016, + ); +} +impl hipDeviceAttribute_t { + ///< Constant frequency of wall clock in kilohertz. + pub const hipDeviceAttributeWallClockRate: hipDeviceAttribute_t = hipDeviceAttribute_t( + 10017, + ); +} +impl hipDeviceAttribute_t { + pub const hipDeviceAttributeAmdSpecificEnd: hipDeviceAttribute_t = hipDeviceAttribute_t( + 19999, + ); +} +impl hipDeviceAttribute_t { + pub const hipDeviceAttributeVendorSpecificBegin: hipDeviceAttribute_t = hipDeviceAttribute_t( + 20000, + ); +} +#[repr(transparent)] +/** hipDeviceAttribute_t + hipDeviceAttributeUnused number: 5*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipDeviceAttribute_t(pub ::core::ffi::c_uint); +impl hipDriverProcAddressQueryResult { + pub const HIP_GET_PROC_ADDRESS_SUCCESS: hipDriverProcAddressQueryResult = hipDriverProcAddressQueryResult( + 0, + ); +} +impl hipDriverProcAddressQueryResult { + pub const HIP_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND: hipDriverProcAddressQueryResult = hipDriverProcAddressQueryResult( + 1, + ); +} +impl hipDriverProcAddressQueryResult { + pub const HIP_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT: hipDriverProcAddressQueryResult = hipDriverProcAddressQueryResult( + 2, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipDriverProcAddressQueryResult(pub ::core::ffi::c_uint); +impl hipComputeMode { + pub const hipComputeModeDefault: hipComputeMode = hipComputeMode(0); +} +impl hipComputeMode { + pub const hipComputeModeExclusive: hipComputeMode = hipComputeMode(1); +} +impl hipComputeMode { + pub const hipComputeModeProhibited: hipComputeMode = hipComputeMode(2); +} +impl hipComputeMode { + pub const hipComputeModeExclusiveProcess: hipComputeMode = hipComputeMode(3); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipComputeMode(pub ::core::ffi::c_uint); +impl hipFlushGPUDirectRDMAWritesOptions { + pub const hipFlushGPUDirectRDMAWritesOptionHost: hipFlushGPUDirectRDMAWritesOptions = hipFlushGPUDirectRDMAWritesOptions( + 1, + ); +} +impl hipFlushGPUDirectRDMAWritesOptions { + pub const hipFlushGPUDirectRDMAWritesOptionMemOps: hipFlushGPUDirectRDMAWritesOptions = hipFlushGPUDirectRDMAWritesOptions( + 2, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipFlushGPUDirectRDMAWritesOptions(pub ::core::ffi::c_uint); +impl hipGPUDirectRDMAWritesOrdering { + pub const hipGPUDirectRDMAWritesOrderingNone: hipGPUDirectRDMAWritesOrdering = hipGPUDirectRDMAWritesOrdering( + 0, + ); +} +impl hipGPUDirectRDMAWritesOrdering { + pub const hipGPUDirectRDMAWritesOrderingOwner: hipGPUDirectRDMAWritesOrdering = hipGPUDirectRDMAWritesOrdering( + 100, + ); +} +impl hipGPUDirectRDMAWritesOrdering { + pub const hipGPUDirectRDMAWritesOrderingAllDevices: hipGPUDirectRDMAWritesOrdering = hipGPUDirectRDMAWritesOrdering( + 200, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGPUDirectRDMAWritesOrdering(pub ::core::ffi::c_uint); +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipDeviceptr_t(pub *mut ::core::ffi::c_void); +impl hipChannelFormatKind { + pub const hipChannelFormatKindSigned: hipChannelFormatKind = hipChannelFormatKind(0); +} +impl hipChannelFormatKind { + pub const hipChannelFormatKindUnsigned: hipChannelFormatKind = hipChannelFormatKind( + 1, + ); +} +impl hipChannelFormatKind { + pub const hipChannelFormatKindFloat: hipChannelFormatKind = hipChannelFormatKind(2); +} +impl hipChannelFormatKind { + pub const hipChannelFormatKindNone: hipChannelFormatKind = hipChannelFormatKind(3); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipChannelFormatKind(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipChannelFormatDesc { + pub x: ::core::ffi::c_int, + pub y: ::core::ffi::c_int, + pub z: ::core::ffi::c_int, + pub w: ::core::ffi::c_int, + pub f: hipChannelFormatKind, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct hipArray { + _unused: [u8; 0], +} +pub type hipArray_t = *mut hipArray; +pub type hipArray_const_t = *const hipArray; +impl hipArray_Format { + pub const HIP_AD_FORMAT_UNSIGNED_INT8: hipArray_Format = hipArray_Format(1); +} +impl hipArray_Format { + pub const HIP_AD_FORMAT_UNSIGNED_INT16: hipArray_Format = hipArray_Format(2); +} +impl hipArray_Format { + pub const HIP_AD_FORMAT_UNSIGNED_INT32: hipArray_Format = hipArray_Format(3); +} +impl hipArray_Format { + pub const HIP_AD_FORMAT_SIGNED_INT8: hipArray_Format = hipArray_Format(8); +} +impl hipArray_Format { + pub const HIP_AD_FORMAT_SIGNED_INT16: hipArray_Format = hipArray_Format(9); +} +impl hipArray_Format { + pub const HIP_AD_FORMAT_SIGNED_INT32: hipArray_Format = hipArray_Format(10); +} +impl hipArray_Format { + pub const HIP_AD_FORMAT_HALF: hipArray_Format = hipArray_Format(16); +} +impl hipArray_Format { + pub const HIP_AD_FORMAT_FLOAT: hipArray_Format = hipArray_Format(32); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipArray_Format(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_ARRAY_DESCRIPTOR { + pub Width: usize, + pub Height: usize, + pub Format: hipArray_Format, + pub NumChannels: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_ARRAY3D_DESCRIPTOR { + pub Width: usize, + pub Height: usize, + pub Depth: usize, + pub Format: hipArray_Format, + pub NumChannels: ::core::ffi::c_uint, + pub Flags: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hip_Memcpy2D { + pub srcXInBytes: usize, + pub srcY: usize, + pub srcMemoryType: hipMemoryType, + pub srcHost: *const ::core::ffi::c_void, + pub srcDevice: hipDeviceptr_t, + pub srcArray: hipArray_t, + pub srcPitch: usize, + pub dstXInBytes: usize, + pub dstY: usize, + pub dstMemoryType: hipMemoryType, + pub dstHost: *mut ::core::ffi::c_void, + pub dstDevice: hipDeviceptr_t, + pub dstArray: hipArray_t, + pub dstPitch: usize, + pub WidthInBytes: usize, + pub Height: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMipmappedArray { + pub data: *mut ::core::ffi::c_void, + pub desc: hipChannelFormatDesc, + pub type_: ::core::ffi::c_uint, + pub width: ::core::ffi::c_uint, + pub height: ::core::ffi::c_uint, + pub depth: ::core::ffi::c_uint, + pub min_mipmap_level: ::core::ffi::c_uint, + pub max_mipmap_level: ::core::ffi::c_uint, + pub flags: ::core::ffi::c_uint, + pub format: hipArray_Format, + pub num_channels: ::core::ffi::c_uint, +} +pub type hipMipmappedArray_t = *mut hipMipmappedArray; +pub type hipmipmappedArray = hipMipmappedArray_t; +pub type hipMipmappedArray_const_t = *const hipMipmappedArray; +impl hipResourceType { + pub const hipResourceTypeArray: hipResourceType = hipResourceType(0); +} +impl hipResourceType { + pub const hipResourceTypeMipmappedArray: hipResourceType = hipResourceType(1); +} +impl hipResourceType { + pub const hipResourceTypeLinear: hipResourceType = hipResourceType(2); +} +impl hipResourceType { + pub const hipResourceTypePitch2D: hipResourceType = hipResourceType(3); +} +#[repr(transparent)] +/// hip resource types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipResourceType(pub ::core::ffi::c_uint); +impl HIPresourcetype_enum { + ///< Array resoure + pub const HIP_RESOURCE_TYPE_ARRAY: HIPresourcetype_enum = HIPresourcetype_enum(0); +} +impl HIPresourcetype_enum { + ///< Mipmapped array resource + pub const HIP_RESOURCE_TYPE_MIPMAPPED_ARRAY: HIPresourcetype_enum = HIPresourcetype_enum( + 1, + ); +} +impl HIPresourcetype_enum { + ///< Linear resource + pub const HIP_RESOURCE_TYPE_LINEAR: HIPresourcetype_enum = HIPresourcetype_enum(2); +} +impl HIPresourcetype_enum { + ///< Pitch 2D resource + pub const HIP_RESOURCE_TYPE_PITCH2D: HIPresourcetype_enum = HIPresourcetype_enum(3); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIPresourcetype_enum(pub ::core::ffi::c_uint); +pub use self::HIPresourcetype_enum as HIPresourcetype; +pub use self::HIPresourcetype_enum as hipResourcetype; +impl HIPaddress_mode_enum { + pub const HIP_TR_ADDRESS_MODE_WRAP: HIPaddress_mode_enum = HIPaddress_mode_enum(0); +} +impl HIPaddress_mode_enum { + pub const HIP_TR_ADDRESS_MODE_CLAMP: HIPaddress_mode_enum = HIPaddress_mode_enum(1); +} +impl HIPaddress_mode_enum { + pub const HIP_TR_ADDRESS_MODE_MIRROR: HIPaddress_mode_enum = HIPaddress_mode_enum(2); +} +impl HIPaddress_mode_enum { + pub const HIP_TR_ADDRESS_MODE_BORDER: HIPaddress_mode_enum = HIPaddress_mode_enum(3); +} +#[repr(transparent)] +/// hip address modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIPaddress_mode_enum(pub ::core::ffi::c_uint); +/// hip address modes +pub use self::HIPaddress_mode_enum as HIPaddress_mode; +impl HIPfilter_mode_enum { + pub const HIP_TR_FILTER_MODE_POINT: HIPfilter_mode_enum = HIPfilter_mode_enum(0); +} +impl HIPfilter_mode_enum { + pub const HIP_TR_FILTER_MODE_LINEAR: HIPfilter_mode_enum = HIPfilter_mode_enum(1); +} +#[repr(transparent)] +/// hip filter modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIPfilter_mode_enum(pub ::core::ffi::c_uint); +/// hip filter modes +pub use self::HIPfilter_mode_enum as HIPfilter_mode; +/// Texture descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct HIP_TEXTURE_DESC_st { + ///< Address modes + pub addressMode: [HIPaddress_mode; 3usize], + ///< Filter mode + pub filterMode: HIPfilter_mode, + ///< Flags + pub flags: ::core::ffi::c_uint, + ///< Maximum anisotropy ratio + pub maxAnisotropy: ::core::ffi::c_uint, + ///< Mipmap filter mode + pub mipmapFilterMode: HIPfilter_mode, + ///< Mipmap level bias + pub mipmapLevelBias: f32, + ///< Mipmap minimum level clamp + pub minMipmapLevelClamp: f32, + ///< Mipmap maximum level clamp + pub maxMipmapLevelClamp: f32, + ///< Border Color + pub borderColor: [f32; 4usize], + pub reserved: [::core::ffi::c_int; 12usize], +} +/// Texture descriptor +pub type HIP_TEXTURE_DESC = HIP_TEXTURE_DESC_st; +impl hipResourceViewFormat { + pub const hipResViewFormatNone: hipResourceViewFormat = hipResourceViewFormat(0); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedChar1: hipResourceViewFormat = hipResourceViewFormat( + 1, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedChar2: hipResourceViewFormat = hipResourceViewFormat( + 2, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedChar4: hipResourceViewFormat = hipResourceViewFormat( + 3, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedChar1: hipResourceViewFormat = hipResourceViewFormat( + 4, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedChar2: hipResourceViewFormat = hipResourceViewFormat( + 5, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedChar4: hipResourceViewFormat = hipResourceViewFormat( + 6, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedShort1: hipResourceViewFormat = hipResourceViewFormat( + 7, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedShort2: hipResourceViewFormat = hipResourceViewFormat( + 8, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedShort4: hipResourceViewFormat = hipResourceViewFormat( + 9, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedShort1: hipResourceViewFormat = hipResourceViewFormat( + 10, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedShort2: hipResourceViewFormat = hipResourceViewFormat( + 11, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedShort4: hipResourceViewFormat = hipResourceViewFormat( + 12, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedInt1: hipResourceViewFormat = hipResourceViewFormat( + 13, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedInt2: hipResourceViewFormat = hipResourceViewFormat( + 14, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedInt4: hipResourceViewFormat = hipResourceViewFormat( + 15, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedInt1: hipResourceViewFormat = hipResourceViewFormat( + 16, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedInt2: hipResourceViewFormat = hipResourceViewFormat( + 17, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedInt4: hipResourceViewFormat = hipResourceViewFormat( + 18, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatHalf1: hipResourceViewFormat = hipResourceViewFormat(19); +} +impl hipResourceViewFormat { + pub const hipResViewFormatHalf2: hipResourceViewFormat = hipResourceViewFormat(20); +} +impl hipResourceViewFormat { + pub const hipResViewFormatHalf4: hipResourceViewFormat = hipResourceViewFormat(21); +} +impl hipResourceViewFormat { + pub const hipResViewFormatFloat1: hipResourceViewFormat = hipResourceViewFormat(22); +} +impl hipResourceViewFormat { + pub const hipResViewFormatFloat2: hipResourceViewFormat = hipResourceViewFormat(23); +} +impl hipResourceViewFormat { + pub const hipResViewFormatFloat4: hipResourceViewFormat = hipResourceViewFormat(24); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedBlockCompressed1: hipResourceViewFormat = hipResourceViewFormat( + 25, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedBlockCompressed2: hipResourceViewFormat = hipResourceViewFormat( + 26, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedBlockCompressed3: hipResourceViewFormat = hipResourceViewFormat( + 27, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedBlockCompressed4: hipResourceViewFormat = hipResourceViewFormat( + 28, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedBlockCompressed4: hipResourceViewFormat = hipResourceViewFormat( + 29, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedBlockCompressed5: hipResourceViewFormat = hipResourceViewFormat( + 30, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedBlockCompressed5: hipResourceViewFormat = hipResourceViewFormat( + 31, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedBlockCompressed6H: hipResourceViewFormat = hipResourceViewFormat( + 32, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatSignedBlockCompressed6H: hipResourceViewFormat = hipResourceViewFormat( + 33, + ); +} +impl hipResourceViewFormat { + pub const hipResViewFormatUnsignedBlockCompressed7: hipResourceViewFormat = hipResourceViewFormat( + 34, + ); +} +#[repr(transparent)] +/// hip texture resource view formats +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipResourceViewFormat(pub ::core::ffi::c_uint); +impl HIPresourceViewFormat_enum { + ///< No resource view format (use underlying resource format) + pub const HIP_RES_VIEW_FORMAT_NONE: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 0, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel unsigned 8-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_1X8: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 1, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel unsigned 8-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_2X8: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 2, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel unsigned 8-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_4X8: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 3, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel signed 8-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_1X8: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 4, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel signed 8-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_2X8: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 5, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel signed 8-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_4X8: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 6, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel unsigned 16-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_1X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 7, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel unsigned 16-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_2X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 8, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel unsigned 16-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_4X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 9, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel signed 16-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_1X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 10, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel signed 16-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_2X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 11, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel signed 16-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_4X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 12, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel unsigned 32-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_1X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 13, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel unsigned 32-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_2X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 14, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel unsigned 32-bit integers + pub const HIP_RES_VIEW_FORMAT_UINT_4X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 15, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel signed 32-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_1X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 16, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel signed 32-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_2X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 17, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel signed 32-bit integers + pub const HIP_RES_VIEW_FORMAT_SINT_4X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 18, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel 16-bit floating point + pub const HIP_RES_VIEW_FORMAT_FLOAT_1X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 19, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel 16-bit floating point + pub const HIP_RES_VIEW_FORMAT_FLOAT_2X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 20, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel 16-bit floating point + pub const HIP_RES_VIEW_FORMAT_FLOAT_4X16: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 21, + ); +} +impl HIPresourceViewFormat_enum { + ///< 1 channel 32-bit floating point + pub const HIP_RES_VIEW_FORMAT_FLOAT_1X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 22, + ); +} +impl HIPresourceViewFormat_enum { + ///< 2 channel 32-bit floating point + pub const HIP_RES_VIEW_FORMAT_FLOAT_2X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 23, + ); +} +impl HIPresourceViewFormat_enum { + ///< 4 channel 32-bit floating point + pub const HIP_RES_VIEW_FORMAT_FLOAT_4X32: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 24, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 1 + pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC1: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 25, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 2 + pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC2: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 26, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 3 + pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC3: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 27, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 4 unsigned + pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC4: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 28, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 4 signed + pub const HIP_RES_VIEW_FORMAT_SIGNED_BC4: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 29, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 5 unsigned + pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC5: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 30, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 5 signed + pub const HIP_RES_VIEW_FORMAT_SIGNED_BC5: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 31, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 6 unsigned half-float + pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC6H: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 32, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 6 signed half-float + pub const HIP_RES_VIEW_FORMAT_SIGNED_BC6H: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 33, + ); +} +impl HIPresourceViewFormat_enum { + ///< Block compressed 7 + pub const HIP_RES_VIEW_FORMAT_UNSIGNED_BC7: HIPresourceViewFormat_enum = HIPresourceViewFormat_enum( + 34, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIPresourceViewFormat_enum(pub ::core::ffi::c_uint); +pub use self::HIPresourceViewFormat_enum as HIPresourceViewFormat; +/// HIP resource descriptor +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipResourceDesc { + pub resType: hipResourceType, + pub res: hipResourceDesc__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipResourceDesc__bindgen_ty_1 { + pub array: hipResourceDesc__bindgen_ty_1__bindgen_ty_1, + pub mipmap: hipResourceDesc__bindgen_ty_1__bindgen_ty_2, + pub linear: hipResourceDesc__bindgen_ty_1__bindgen_ty_3, + pub pitch2D: hipResourceDesc__bindgen_ty_1__bindgen_ty_4, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_1 { + pub array: hipArray_t, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_2 { + pub mipmap: hipMipmappedArray_t, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_3 { + pub devPtr: *mut ::core::ffi::c_void, + pub desc: hipChannelFormatDesc, + pub sizeInBytes: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipResourceDesc__bindgen_ty_1__bindgen_ty_4 { + pub devPtr: *mut ::core::ffi::c_void, + pub desc: hipChannelFormatDesc, + pub width: usize, + pub height: usize, + pub pitchInBytes: usize, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct HIP_RESOURCE_DESC_st { + ///< Resource type + pub resType: HIPresourcetype, + pub res: HIP_RESOURCE_DESC_st__bindgen_ty_1, + ///< Flags (must be zero) + pub flags: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union HIP_RESOURCE_DESC_st__bindgen_ty_1 { + pub array: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1, + pub mipmap: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2, + pub linear: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3, + pub pitch2D: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4, + pub reserved: HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 { + ///< HIP array + pub hArray: hipArray_t, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 { + ///< HIP mipmapped array + pub hMipmappedArray: hipMipmappedArray_t, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 { + ///< Device pointer + pub devPtr: hipDeviceptr_t, + ///< Array format + pub format: hipArray_Format, + ///< Channels per array element + pub numChannels: ::core::ffi::c_uint, + ///< Size in bytes + pub sizeInBytes: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 { + ///< Device pointer + pub devPtr: hipDeviceptr_t, + ///< Array format + pub format: hipArray_Format, + ///< Channels per array element + pub numChannels: ::core::ffi::c_uint, + ///< Width of the array in elements + pub width: usize, + ///< Height of the array in elements + pub height: usize, + ///< Pitch between two rows in bytes + pub pitchInBytes: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 { + pub reserved: [::core::ffi::c_int; 32usize], +} +pub type HIP_RESOURCE_DESC = HIP_RESOURCE_DESC_st; +/// hip resource view descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipResourceViewDesc { + pub format: hipResourceViewFormat, + pub width: usize, + pub height: usize, + pub depth: usize, + pub firstMipmapLevel: ::core::ffi::c_uint, + pub lastMipmapLevel: ::core::ffi::c_uint, + pub firstLayer: ::core::ffi::c_uint, + pub lastLayer: ::core::ffi::c_uint, +} +/// Resource view descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_RESOURCE_VIEW_DESC_st { + ///< Resource view format + pub format: HIPresourceViewFormat, + ///< Width of the resource view + pub width: usize, + ///< Height of the resource view + pub height: usize, + ///< Depth of the resource view + pub depth: usize, + ///< First defined mipmap level + pub firstMipmapLevel: ::core::ffi::c_uint, + ///< Last defined mipmap level + pub lastMipmapLevel: ::core::ffi::c_uint, + ///< First layer index + pub firstLayer: ::core::ffi::c_uint, + ///< Last layer index + pub lastLayer: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +/// Resource view descriptor +pub type HIP_RESOURCE_VIEW_DESC = HIP_RESOURCE_VIEW_DESC_st; +impl hipMemcpyKind { + ///< Host-to-Host Copy + pub const hipMemcpyHostToHost: hipMemcpyKind = hipMemcpyKind(0); +} +impl hipMemcpyKind { + ///< Host-to-Device Copy + pub const hipMemcpyHostToDevice: hipMemcpyKind = hipMemcpyKind(1); +} +impl hipMemcpyKind { + ///< Device-to-Host Copy + pub const hipMemcpyDeviceToHost: hipMemcpyKind = hipMemcpyKind(2); +} +impl hipMemcpyKind { + ///< Device-to-Device Copy + pub const hipMemcpyDeviceToDevice: hipMemcpyKind = hipMemcpyKind(3); +} +impl hipMemcpyKind { + /**< Runtime will automatically determine + hipChannelFormatDesc; +} +/// An opaque value that represents a hip texture object +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __hip_texture { + _unused: [u8; 0], +} +pub type hipTextureObject_t = *mut __hip_texture; +impl hipTextureAddressMode { + pub const hipAddressModeWrap: hipTextureAddressMode = hipTextureAddressMode(0); +} +impl hipTextureAddressMode { + pub const hipAddressModeClamp: hipTextureAddressMode = hipTextureAddressMode(1); +} +impl hipTextureAddressMode { + pub const hipAddressModeMirror: hipTextureAddressMode = hipTextureAddressMode(2); +} +impl hipTextureAddressMode { + pub const hipAddressModeBorder: hipTextureAddressMode = hipTextureAddressMode(3); +} +#[repr(transparent)] +/// hip texture address modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipTextureAddressMode(pub ::core::ffi::c_uint); +impl hipTextureFilterMode { + pub const hipFilterModePoint: hipTextureFilterMode = hipTextureFilterMode(0); +} +impl hipTextureFilterMode { + pub const hipFilterModeLinear: hipTextureFilterMode = hipTextureFilterMode(1); +} +#[repr(transparent)] +/// hip texture filter modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipTextureFilterMode(pub ::core::ffi::c_uint); +impl hipTextureReadMode { + pub const hipReadModeElementType: hipTextureReadMode = hipTextureReadMode(0); +} +impl hipTextureReadMode { + pub const hipReadModeNormalizedFloat: hipTextureReadMode = hipTextureReadMode(1); +} +#[repr(transparent)] +/// hip texture read modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipTextureReadMode(pub ::core::ffi::c_uint); +/// hip texture reference +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct textureReference { + pub normalized: ::core::ffi::c_int, + pub readMode: hipTextureReadMode, + pub filterMode: hipTextureFilterMode, + pub addressMode: [hipTextureAddressMode; 3usize], + pub channelDesc: hipChannelFormatDesc, + pub sRGB: ::core::ffi::c_int, + pub maxAnisotropy: ::core::ffi::c_uint, + pub mipmapFilterMode: hipTextureFilterMode, + pub mipmapLevelBias: f32, + pub minMipmapLevelClamp: f32, + pub maxMipmapLevelClamp: f32, + pub textureObject: hipTextureObject_t, + pub numChannels: ::core::ffi::c_int, + pub format: hipArray_Format, +} +/// hip texture descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct hipTextureDesc { + pub addressMode: [hipTextureAddressMode; 3usize], + pub filterMode: hipTextureFilterMode, + pub readMode: hipTextureReadMode, + pub sRGB: ::core::ffi::c_int, + pub borderColor: [f32; 4usize], + pub normalizedCoords: ::core::ffi::c_int, + pub maxAnisotropy: ::core::ffi::c_uint, + pub mipmapFilterMode: hipTextureFilterMode, + pub mipmapLevelBias: f32, + pub minMipmapLevelClamp: f32, + pub maxMipmapLevelClamp: f32, +} +/// An opaque value that represents a hip surface object +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __hip_surface { + _unused: [u8; 0], +} +pub type hipSurfaceObject_t = *mut __hip_surface; +impl hipSurfaceBoundaryMode { + pub const hipBoundaryModeZero: hipSurfaceBoundaryMode = hipSurfaceBoundaryMode(0); +} +impl hipSurfaceBoundaryMode { + pub const hipBoundaryModeTrap: hipSurfaceBoundaryMode = hipSurfaceBoundaryMode(1); +} +impl hipSurfaceBoundaryMode { + pub const hipBoundaryModeClamp: hipSurfaceBoundaryMode = hipSurfaceBoundaryMode(2); +} +#[repr(transparent)] +/// hip surface boundary modes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipSurfaceBoundaryMode(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipCtx_t { + _unused: [u8; 0], +} +pub type hipCtx_t = *mut ihipCtx_t; +pub type hipDevice_t = ::core::ffi::c_int; +impl hipDeviceP2PAttr { + pub const hipDevP2PAttrPerformanceRank: hipDeviceP2PAttr = hipDeviceP2PAttr(0); +} +impl hipDeviceP2PAttr { + pub const hipDevP2PAttrAccessSupported: hipDeviceP2PAttr = hipDeviceP2PAttr(1); +} +impl hipDeviceP2PAttr { + pub const hipDevP2PAttrNativeAtomicSupported: hipDeviceP2PAttr = hipDeviceP2PAttr(2); +} +impl hipDeviceP2PAttr { + pub const hipDevP2PAttrHipArrayAccessSupported: hipDeviceP2PAttr = hipDeviceP2PAttr( + 3, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipDeviceP2PAttr(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipStream_t { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipStream_t(pub *mut ihipStream_t); +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipIpcMemHandle_st { + pub reserved: [::core::ffi::c_char; 64usize], +} +pub type hipIpcMemHandle_t = hipIpcMemHandle_st; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipIpcEventHandle_st { + pub reserved: [::core::ffi::c_char; 64usize], +} +pub type hipIpcEventHandle_t = hipIpcEventHandle_st; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipModule_t { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipModule_t(pub *mut ihipModule_t); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipModuleSymbol_t { + _unused: [u8; 0], +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipFunction_t(pub *mut ihipModuleSymbol_t); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipMemPoolHandle_t { + _unused: [u8; 0], +} +/// HIP memory pool +pub type hipMemPool_t = *mut ihipMemPoolHandle_t; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipFuncAttributes { + pub binaryVersion: ::core::ffi::c_int, + pub cacheModeCA: ::core::ffi::c_int, + pub constSizeBytes: usize, + pub localSizeBytes: usize, + pub maxDynamicSharedSizeBytes: ::core::ffi::c_int, + pub maxThreadsPerBlock: ::core::ffi::c_int, + pub numRegs: ::core::ffi::c_int, + pub preferredShmemCarveout: ::core::ffi::c_int, + pub ptxVersion: ::core::ffi::c_int, + pub sharedSizeBytes: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipEvent_t { + _unused: [u8; 0], +} +pub type hipEvent_t = *mut ihipEvent_t; +impl hipLimit_t { + /**< Limit of stack size in bytes on the current device, per +< thread. The size is in units of 256 dwords, up to the +< limit of (128K - 16)*/ + pub const hipLimitStackSize: hipLimit_t = hipLimit_t(0); +} +impl hipLimit_t { + /**< Size limit in bytes of fifo used by printf call on the +< device. Currently not supported*/ + pub const hipLimitPrintfFifoSize: hipLimit_t = hipLimit_t(1); +} +impl hipLimit_t { + /**< Limit of heap size in bytes on the current device, should +< be less than the global memory size on the device*/ + pub const hipLimitMallocHeapSize: hipLimit_t = hipLimit_t(2); +} +impl hipLimit_t { + ///< Supported limit range + pub const hipLimitRange: hipLimit_t = hipLimit_t(3); +} +#[repr(transparent)] +/** hipLimit + + @note In HIP device limit-related APIs, any input limit value other than those defined in the + enum is treated as "UnsupportedLimit" by default.*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipLimit_t(pub ::core::ffi::c_uint); +impl hipMemoryAdvise { + /**< Data will mostly be read and only occassionally +< be written to*/ + pub const hipMemAdviseSetReadMostly: hipMemoryAdvise = hipMemoryAdvise(1); +} +impl hipMemoryAdvise { + ///< Undo the effect of hipMemAdviseSetReadMostly + pub const hipMemAdviseUnsetReadMostly: hipMemoryAdvise = hipMemoryAdvise(2); +} +impl hipMemoryAdvise { + /**< Set the preferred location for the data as +< the specified device*/ + pub const hipMemAdviseSetPreferredLocation: hipMemoryAdvise = hipMemoryAdvise(3); +} +impl hipMemoryAdvise { + ///< Clear the preferred location for the data + pub const hipMemAdviseUnsetPreferredLocation: hipMemoryAdvise = hipMemoryAdvise(4); +} +impl hipMemoryAdvise { + /**< Data will be accessed by the specified device +< so prevent page faults as much as possible*/ + pub const hipMemAdviseSetAccessedBy: hipMemoryAdvise = hipMemoryAdvise(5); +} +impl hipMemoryAdvise { + /**< Let HIP to decide on the page faulting policy +< for the specified device*/ + pub const hipMemAdviseUnsetAccessedBy: hipMemoryAdvise = hipMemoryAdvise(6); +} +impl hipMemoryAdvise { + /**< The default memory model is fine-grain. That allows +< coherent operations between host and device, while +< executing kernels. The coarse-grain can be used +< for data that only needs to be coherent at dispatch +< boundaries for better performance*/ + pub const hipMemAdviseSetCoarseGrain: hipMemoryAdvise = hipMemoryAdvise(100); +} +impl hipMemoryAdvise { + ///< Restores cache coherency policy back to fine-grain + pub const hipMemAdviseUnsetCoarseGrain: hipMemoryAdvise = hipMemoryAdvise(101); +} +#[repr(transparent)] +/** HIP Memory Advise values + + @note This memory advise enumeration is used on Linux, not Windows.*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemoryAdvise(pub ::core::ffi::c_uint); +impl hipMemRangeCoherencyMode { + /**< Updates to memory with this attribute can be +< done coherently from all devices*/ + pub const hipMemRangeCoherencyModeFineGrain: hipMemRangeCoherencyMode = hipMemRangeCoherencyMode( + 0, + ); +} +impl hipMemRangeCoherencyMode { + /**< Writes to memory with this attribute can be +< performed by a single device at a time*/ + pub const hipMemRangeCoherencyModeCoarseGrain: hipMemRangeCoherencyMode = hipMemRangeCoherencyMode( + 1, + ); +} +impl hipMemRangeCoherencyMode { + /**< Memory region queried contains subregions with +< both hipMemRangeCoherencyModeFineGrain and +< hipMemRangeCoherencyModeCoarseGrain attributes*/ + pub const hipMemRangeCoherencyModeIndeterminate: hipMemRangeCoherencyMode = hipMemRangeCoherencyMode( + 2, + ); +} +#[repr(transparent)] +/// HIP Coherency Mode +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemRangeCoherencyMode(pub ::core::ffi::c_uint); +impl hipMemRangeAttribute { + /**< Whether the range will mostly be read and +< only occassionally be written to*/ + pub const hipMemRangeAttributeReadMostly: hipMemRangeAttribute = hipMemRangeAttribute( + 1, + ); +} +impl hipMemRangeAttribute { + ///< The preferred location of the range + pub const hipMemRangeAttributePreferredLocation: hipMemRangeAttribute = hipMemRangeAttribute( + 2, + ); +} +impl hipMemRangeAttribute { + /**< Memory range has hipMemAdviseSetAccessedBy +< set for the specified device*/ + pub const hipMemRangeAttributeAccessedBy: hipMemRangeAttribute = hipMemRangeAttribute( + 3, + ); +} +impl hipMemRangeAttribute { + /**< The last location to where the range was +< prefetched*/ + pub const hipMemRangeAttributeLastPrefetchLocation: hipMemRangeAttribute = hipMemRangeAttribute( + 4, + ); +} +impl hipMemRangeAttribute { + /**< Returns coherency mode +< @ref hipMemRangeCoherencyMode for the range*/ + pub const hipMemRangeAttributeCoherencyMode: hipMemRangeAttribute = hipMemRangeAttribute( + 100, + ); +} +#[repr(transparent)] +/// HIP range attributes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemRangeAttribute(pub ::core::ffi::c_uint); +impl hipMemPoolAttr { + /** (value type = int) + Allow @p hipMemAllocAsync to use memory asynchronously freed + in another streams as long as a stream ordering dependency + of the allocating stream on the free action exists. + hip events and null stream interactions can create the required + stream ordered dependencies. (default enabled)*/ + pub const hipMemPoolReuseFollowEventDependencies: hipMemPoolAttr = hipMemPoolAttr(1); +} +impl hipMemPoolAttr { + /** (value type = int) + Allow reuse of already completed frees when there is no dependency + between the free and allocation. (default enabled)*/ + pub const hipMemPoolReuseAllowOpportunistic: hipMemPoolAttr = hipMemPoolAttr(2); +} +impl hipMemPoolAttr { + /** (value type = int) + Allow @p hipMemAllocAsync to insert new stream dependencies + in order to establish the stream ordering required to reuse + a piece of memory released by cuFreeAsync (default enabled).*/ + pub const hipMemPoolReuseAllowInternalDependencies: hipMemPoolAttr = hipMemPoolAttr( + 3, + ); +} +impl hipMemPoolAttr { + /** (value type = uint64_t) + Amount of reserved memory in bytes to hold onto before trying + to release memory back to the OS. When more than the release + threshold bytes of memory are held by the memory pool, the + allocator will try to release memory back to the OS on the + next call to stream, event or context synchronize. (default 0)*/ + pub const hipMemPoolAttrReleaseThreshold: hipMemPoolAttr = hipMemPoolAttr(4); +} +impl hipMemPoolAttr { + /** (value type = uint64_t) + Amount of backing memory currently allocated for the mempool.*/ + pub const hipMemPoolAttrReservedMemCurrent: hipMemPoolAttr = hipMemPoolAttr(5); +} +impl hipMemPoolAttr { + /** (value type = uint64_t) + High watermark of backing memory allocated for the mempool since the + last time it was reset. High watermark can only be reset to zero.*/ + pub const hipMemPoolAttrReservedMemHigh: hipMemPoolAttr = hipMemPoolAttr(6); +} +impl hipMemPoolAttr { + /** (value type = uint64_t) + Amount of memory from the pool that is currently in use by the application.*/ + pub const hipMemPoolAttrUsedMemCurrent: hipMemPoolAttr = hipMemPoolAttr(7); +} +impl hipMemPoolAttr { + /** (value type = uint64_t) + High watermark of the amount of memory from the pool that was in use by the application since + the last time it was reset. High watermark can only be reset to zero.*/ + pub const hipMemPoolAttrUsedMemHigh: hipMemPoolAttr = hipMemPoolAttr(8); +} +#[repr(transparent)] +/// HIP memory pool attributes +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemPoolAttr(pub ::core::ffi::c_uint); +impl hipMemLocationType { + pub const hipMemLocationTypeInvalid: hipMemLocationType = hipMemLocationType(0); +} +impl hipMemLocationType { + ///< Device location, thus it's HIP device ID + pub const hipMemLocationTypeDevice: hipMemLocationType = hipMemLocationType(1); +} +#[repr(transparent)] +/// Specifies the type of location +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemLocationType(pub ::core::ffi::c_uint); +/** Specifies a memory location. + + To specify a gpu, set type = @p hipMemLocationTypeDevice and set id = the gpu's device ID*/ +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemLocation { + ///< Specifies the location type, which describes the meaning of id + pub type_: hipMemLocationType, + ///< Identifier for the provided location type @p hipMemLocationType + pub id: ::core::ffi::c_int, +} +impl hipMemAccessFlags { + ///< Default, make the address range not accessible + pub const hipMemAccessFlagsProtNone: hipMemAccessFlags = hipMemAccessFlags(0); +} +impl hipMemAccessFlags { + ///< Set the address range read accessible + pub const hipMemAccessFlagsProtRead: hipMemAccessFlags = hipMemAccessFlags(1); +} +impl hipMemAccessFlags { + ///< Set the address range read-write accessible + pub const hipMemAccessFlagsProtReadWrite: hipMemAccessFlags = hipMemAccessFlags(3); +} +#[repr(transparent)] +/** Specifies the memory protection flags for mapping +*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAccessFlags(pub ::core::ffi::c_uint); +/// Memory access descriptor +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAccessDesc { + ///< Location on which the accessibility has to change + pub location: hipMemLocation, + ///< Accessibility flags to set + pub flags: hipMemAccessFlags, +} +impl hipMemAllocationType { + pub const hipMemAllocationTypeInvalid: hipMemAllocationType = hipMemAllocationType( + 0, + ); +} +impl hipMemAllocationType { + /** This allocation type is 'pinned', i.e. cannot migrate from its current + location while the application is actively using it*/ + pub const hipMemAllocationTypePinned: hipMemAllocationType = hipMemAllocationType(1); +} +impl hipMemAllocationType { + /** This allocation type is 'pinned', i.e. cannot migrate from its current + location while the application is actively using it*/ + pub const hipMemAllocationTypeMax: hipMemAllocationType = hipMemAllocationType( + 2147483647, + ); +} +#[repr(transparent)] +/// Defines the allocation types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAllocationType(pub ::core::ffi::c_uint); +impl hipMemAllocationHandleType { + ///< Does not allow any export mechanism + pub const hipMemHandleTypeNone: hipMemAllocationHandleType = hipMemAllocationHandleType( + 0, + ); +} +impl hipMemAllocationHandleType { + ///< Allows a file descriptor for exporting. Permitted only on POSIX systems + pub const hipMemHandleTypePosixFileDescriptor: hipMemAllocationHandleType = hipMemAllocationHandleType( + 1, + ); +} +impl hipMemAllocationHandleType { + ///< Allows a Win32 NT handle for exporting. (HANDLE) + pub const hipMemHandleTypeWin32: hipMemAllocationHandleType = hipMemAllocationHandleType( + 2, + ); +} +impl hipMemAllocationHandleType { + ///< Allows a Win32 KMT handle for exporting. (D3DKMT_HANDLE) + pub const hipMemHandleTypeWin32Kmt: hipMemAllocationHandleType = hipMemAllocationHandleType( + 4, + ); +} +#[repr(transparent)] +/** Flags for specifying handle types for memory pool allocations +*/ +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAllocationHandleType(pub ::core::ffi::c_uint); +/// Specifies the properties of allocations made from the pool. +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemPoolProps { + ///< Allocation type. Currently must be specified as @p hipMemAllocationTypePinned + pub allocType: hipMemAllocationType, + ///< Handle types that will be supported by allocations from the pool + pub handleTypes: hipMemAllocationHandleType, + ///< Location where allocations should reside + pub location: hipMemLocation, + /// Windows-specific LPSECURITYATTRIBUTES required when @p hipMemHandleTypeWin32 is specified + pub win32SecurityAttributes: *mut ::core::ffi::c_void, + ///< Maximum pool size. When set to 0, defaults to a system dependent value + pub maxSize: usize, + ///< Reserved for future use, must be 0 + pub reserved: [::core::ffi::c_uchar; 56usize], +} +/// Opaque data structure for exporting a pool allocation +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemPoolPtrExportData { + pub reserved: [::core::ffi::c_uchar; 64usize], +} +impl hipJitOption { + pub const hipJitOptionMaxRegisters: hipJitOption = hipJitOption(0); +} +impl hipJitOption { + pub const hipJitOptionThreadsPerBlock: hipJitOption = hipJitOption(1); +} +impl hipJitOption { + pub const hipJitOptionWallTime: hipJitOption = hipJitOption(2); +} +impl hipJitOption { + pub const hipJitOptionInfoLogBuffer: hipJitOption = hipJitOption(3); +} +impl hipJitOption { + pub const hipJitOptionInfoLogBufferSizeBytes: hipJitOption = hipJitOption(4); +} +impl hipJitOption { + pub const hipJitOptionErrorLogBuffer: hipJitOption = hipJitOption(5); +} +impl hipJitOption { + pub const hipJitOptionErrorLogBufferSizeBytes: hipJitOption = hipJitOption(6); +} +impl hipJitOption { + pub const hipJitOptionOptimizationLevel: hipJitOption = hipJitOption(7); +} +impl hipJitOption { + pub const hipJitOptionTargetFromContext: hipJitOption = hipJitOption(8); +} +impl hipJitOption { + pub const hipJitOptionTarget: hipJitOption = hipJitOption(9); +} +impl hipJitOption { + pub const hipJitOptionFallbackStrategy: hipJitOption = hipJitOption(10); +} +impl hipJitOption { + pub const hipJitOptionGenerateDebugInfo: hipJitOption = hipJitOption(11); +} +impl hipJitOption { + pub const hipJitOptionLogVerbose: hipJitOption = hipJitOption(12); +} +impl hipJitOption { + pub const hipJitOptionGenerateLineInfo: hipJitOption = hipJitOption(13); +} +impl hipJitOption { + pub const hipJitOptionCacheMode: hipJitOption = hipJitOption(14); +} +impl hipJitOption { + pub const hipJitOptionSm3xOpt: hipJitOption = hipJitOption(15); +} +impl hipJitOption { + pub const hipJitOptionFastCompile: hipJitOption = hipJitOption(16); +} +impl hipJitOption { + pub const hipJitOptionNumOptions: hipJitOption = hipJitOption(17); +} +#[repr(transparent)] +/// hipJitOption +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipJitOption(pub ::core::ffi::c_uint); +impl hipFuncAttribute { + pub const hipFuncAttributeMaxDynamicSharedMemorySize: hipFuncAttribute = hipFuncAttribute( + 8, + ); +} +impl hipFuncAttribute { + pub const hipFuncAttributePreferredSharedMemoryCarveout: hipFuncAttribute = hipFuncAttribute( + 9, + ); +} +impl hipFuncAttribute { + pub const hipFuncAttributeMax: hipFuncAttribute = hipFuncAttribute(10); +} +#[repr(transparent)] +/// @warning On AMD devices and some Nvidia devices, these hints and controls are ignored. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipFuncAttribute(pub ::core::ffi::c_uint); +impl hipFuncCache_t { + ///< no preference for shared memory or L1 (default) + pub const hipFuncCachePreferNone: hipFuncCache_t = hipFuncCache_t(0); +} +impl hipFuncCache_t { + ///< prefer larger shared memory and smaller L1 cache + pub const hipFuncCachePreferShared: hipFuncCache_t = hipFuncCache_t(1); +} +impl hipFuncCache_t { + ///< prefer larger L1 cache and smaller shared memory + pub const hipFuncCachePreferL1: hipFuncCache_t = hipFuncCache_t(2); +} +impl hipFuncCache_t { + ///< prefer equal size L1 cache and shared memory + pub const hipFuncCachePreferEqual: hipFuncCache_t = hipFuncCache_t(3); +} +#[repr(transparent)] +/// @warning On AMD devices and some Nvidia devices, these hints and controls are ignored. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipFuncCache_t(pub ::core::ffi::c_uint); +impl hipSharedMemConfig { + ///< The compiler selects a device-specific value for the banking. + pub const hipSharedMemBankSizeDefault: hipSharedMemConfig = hipSharedMemConfig(0); +} +impl hipSharedMemConfig { + /**< Shared mem is banked at 4-bytes intervals and performs best +< when adjacent threads access data 4 bytes apart.*/ + pub const hipSharedMemBankSizeFourByte: hipSharedMemConfig = hipSharedMemConfig(1); +} +impl hipSharedMemConfig { + /**< Shared mem is banked at 8-byte intervals and performs best +< when adjacent threads access data 4 bytes apart.*/ + pub const hipSharedMemBankSizeEightByte: hipSharedMemConfig = hipSharedMemConfig(2); +} +#[repr(transparent)] +/// @warning On AMD devices and some Nvidia devices, these hints and controls are ignored. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipSharedMemConfig(pub ::core::ffi::c_uint); +/// Struct for data in 3D +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct dim3 { + ///< x + pub x: u32, + ///< y + pub y: u32, + ///< z + pub z: u32, +} +/// struct hipLaunchParams_t +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipLaunchParams_t { + ///< Device function symbol + pub func: *mut ::core::ffi::c_void, + ///< Grid dimentions + pub gridDim: dim3, + ///< Block dimentions + pub blockDim: dim3, + ///< Arguments + pub args: *mut *mut ::core::ffi::c_void, + ///< Shared memory + pub sharedMem: usize, + ///< Stream identifier + pub stream: hipStream_t, +} +/// struct hipLaunchParams_t +pub type hipLaunchParams = hipLaunchParams_t; +/// struct hipFunctionLaunchParams_t +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipFunctionLaunchParams_t { + ///< Kernel to launch + pub function: hipFunction_t, + ///< Width(X) of grid in blocks + pub gridDimX: ::core::ffi::c_uint, + ///< Height(Y) of grid in blocks + pub gridDimY: ::core::ffi::c_uint, + ///< Depth(Z) of grid in blocks + pub gridDimZ: ::core::ffi::c_uint, + ///< X dimension of each thread block + pub blockDimX: ::core::ffi::c_uint, + ///< Y dimension of each thread block + pub blockDimY: ::core::ffi::c_uint, + ///< Z dimension of each thread block + pub blockDimZ: ::core::ffi::c_uint, + ///< Shared memory + pub sharedMemBytes: ::core::ffi::c_uint, + ///< Stream identifier + pub hStream: hipStream_t, + ///< Kernel parameters + pub kernelParams: *mut *mut ::core::ffi::c_void, +} +/// struct hipFunctionLaunchParams_t +pub type hipFunctionLaunchParams = hipFunctionLaunchParams_t; +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeOpaqueFd: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 1, + ); +} +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeOpaqueWin32: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 2, + ); +} +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeOpaqueWin32Kmt: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 3, + ); +} +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeD3D12Heap: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 4, + ); +} +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeD3D12Resource: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 5, + ); +} +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeD3D11Resource: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 6, + ); +} +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeD3D11ResourceKmt: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 7, + ); +} +impl hipExternalMemoryHandleType_enum { + pub const hipExternalMemoryHandleTypeNvSciBuf: hipExternalMemoryHandleType_enum = hipExternalMemoryHandleType_enum( + 8, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalMemoryHandleType_enum(pub ::core::ffi::c_uint); +pub use self::hipExternalMemoryHandleType_enum as hipExternalMemoryHandleType; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipExternalMemoryHandleDesc_st { + pub type_: hipExternalMemoryHandleType, + pub handle: hipExternalMemoryHandleDesc_st__bindgen_ty_1, + pub size: ::core::ffi::c_ulonglong, + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipExternalMemoryHandleDesc_st__bindgen_ty_1 { + pub fd: ::core::ffi::c_int, + pub win32: hipExternalMemoryHandleDesc_st__bindgen_ty_1__bindgen_ty_1, + pub nvSciBufObject: *const ::core::ffi::c_void, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalMemoryHandleDesc_st__bindgen_ty_1__bindgen_ty_1 { + pub handle: *mut ::core::ffi::c_void, + pub name: *const ::core::ffi::c_void, +} +pub type hipExternalMemoryHandleDesc = hipExternalMemoryHandleDesc_st; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalMemoryBufferDesc_st { + pub offset: ::core::ffi::c_ulonglong, + pub size: ::core::ffi::c_ulonglong, + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +pub type hipExternalMemoryBufferDesc = hipExternalMemoryBufferDesc_st; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalMemoryMipmappedArrayDesc_st { + pub offset: ::core::ffi::c_ulonglong, + pub formatDesc: hipChannelFormatDesc, + pub extent: hipExtent, + pub flags: ::core::ffi::c_uint, + pub numLevels: ::core::ffi::c_uint, +} +pub type hipExternalMemoryMipmappedArrayDesc = hipExternalMemoryMipmappedArrayDesc_st; +pub type hipExternalMemory_t = *mut ::core::ffi::c_void; +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeOpaqueFd: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 1, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeOpaqueWin32: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 2, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeOpaqueWin32Kmt: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 3, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeD3D12Fence: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 4, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeD3D11Fence: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 5, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeNvSciSync: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 6, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeKeyedMutex: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 7, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeKeyedMutexKmt: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 8, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeTimelineSemaphoreFd: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 9, + ); +} +impl hipExternalSemaphoreHandleType_enum { + pub const hipExternalSemaphoreHandleTypeTimelineSemaphoreWin32: hipExternalSemaphoreHandleType_enum = hipExternalSemaphoreHandleType_enum( + 10, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreHandleType_enum(pub ::core::ffi::c_uint); +pub use self::hipExternalSemaphoreHandleType_enum as hipExternalSemaphoreHandleType; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipExternalSemaphoreHandleDesc_st { + pub type_: hipExternalSemaphoreHandleType, + pub handle: hipExternalSemaphoreHandleDesc_st__bindgen_ty_1, + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipExternalSemaphoreHandleDesc_st__bindgen_ty_1 { + pub fd: ::core::ffi::c_int, + pub win32: hipExternalSemaphoreHandleDesc_st__bindgen_ty_1__bindgen_ty_1, + pub NvSciSyncObj: *const ::core::ffi::c_void, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreHandleDesc_st__bindgen_ty_1__bindgen_ty_1 { + pub handle: *mut ::core::ffi::c_void, + pub name: *const ::core::ffi::c_void, +} +pub type hipExternalSemaphoreHandleDesc = hipExternalSemaphoreHandleDesc_st; +pub type hipExternalSemaphore_t = *mut ::core::ffi::c_void; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipExternalSemaphoreSignalParams_st { + pub params: hipExternalSemaphoreSignalParams_st__bindgen_ty_1, + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipExternalSemaphoreSignalParams_st__bindgen_ty_1 { + pub fence: hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_1, + pub nvSciSync: hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_2, + pub keyedMutex: hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_3, + pub reserved: [::core::ffi::c_uint; 12usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_1 { + pub value: ::core::ffi::c_ulonglong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_2 { + pub fence: *mut ::core::ffi::c_void, + pub reserved: ::core::ffi::c_ulonglong, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreSignalParams_st__bindgen_ty_1__bindgen_ty_3 { + pub key: ::core::ffi::c_ulonglong, +} +pub type hipExternalSemaphoreSignalParams = hipExternalSemaphoreSignalParams_st; +/// External semaphore wait parameters, compatible with driver type +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipExternalSemaphoreWaitParams_st { + pub params: hipExternalSemaphoreWaitParams_st__bindgen_ty_1, + pub flags: ::core::ffi::c_uint, + pub reserved: [::core::ffi::c_uint; 16usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipExternalSemaphoreWaitParams_st__bindgen_ty_1 { + pub fence: hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_1, + pub nvSciSync: hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_2, + pub keyedMutex: hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_3, + pub reserved: [::core::ffi::c_uint; 10usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_1 { + pub value: ::core::ffi::c_ulonglong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_2 { + pub fence: *mut ::core::ffi::c_void, + pub reserved: ::core::ffi::c_ulonglong, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreWaitParams_st__bindgen_ty_1__bindgen_ty_3 { + pub key: ::core::ffi::c_ulonglong, + pub timeoutMs: ::core::ffi::c_uint, +} +/// External semaphore wait parameters, compatible with driver type +pub type hipExternalSemaphoreWaitParams = hipExternalSemaphoreWaitParams_st; +impl hipGraphicsRegisterFlags { + pub const hipGraphicsRegisterFlagsNone: hipGraphicsRegisterFlags = hipGraphicsRegisterFlags( + 0, + ); +} +impl hipGraphicsRegisterFlags { + ///< HIP will not write to this registered resource + pub const hipGraphicsRegisterFlagsReadOnly: hipGraphicsRegisterFlags = hipGraphicsRegisterFlags( + 1, + ); +} +impl hipGraphicsRegisterFlags { + pub const hipGraphicsRegisterFlagsWriteDiscard: hipGraphicsRegisterFlags = hipGraphicsRegisterFlags( + 2, + ); +} +impl hipGraphicsRegisterFlags { + ///< HIP will bind this resource to a surface + pub const hipGraphicsRegisterFlagsSurfaceLoadStore: hipGraphicsRegisterFlags = hipGraphicsRegisterFlags( + 4, + ); +} +impl hipGraphicsRegisterFlags { + pub const hipGraphicsRegisterFlagsTextureGather: hipGraphicsRegisterFlags = hipGraphicsRegisterFlags( + 8, + ); +} +#[repr(transparent)] +/// HIP Access falgs for Interop resources. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphicsRegisterFlags(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _hipGraphicsResource { + _unused: [u8; 0], +} +pub type hipGraphicsResource = _hipGraphicsResource; +pub type hipGraphicsResource_t = *mut hipGraphicsResource; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipGraph { + _unused: [u8; 0], +} +/// An opaque value that represents a hip graph +pub type hipGraph_t = *mut ihipGraph; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct hipGraphNode { + _unused: [u8; 0], +} +/// An opaque value that represents a hip graph node +pub type hipGraphNode_t = *mut hipGraphNode; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct hipGraphExec { + _unused: [u8; 0], +} +/// An opaque value that represents a hip graph Exec +pub type hipGraphExec_t = *mut hipGraphExec; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct hipUserObject { + _unused: [u8; 0], +} +/// An opaque value that represents a user obj +pub type hipUserObject_t = *mut hipUserObject; +impl hipGraphNodeType { + ///< GPU kernel node + pub const hipGraphNodeTypeKernel: hipGraphNodeType = hipGraphNodeType(0); +} +impl hipGraphNodeType { + ///< Memcpy node + pub const hipGraphNodeTypeMemcpy: hipGraphNodeType = hipGraphNodeType(1); +} +impl hipGraphNodeType { + ///< Memset node + pub const hipGraphNodeTypeMemset: hipGraphNodeType = hipGraphNodeType(2); +} +impl hipGraphNodeType { + ///< Host (executable) node + pub const hipGraphNodeTypeHost: hipGraphNodeType = hipGraphNodeType(3); +} +impl hipGraphNodeType { + ///< Node which executes an embedded graph + pub const hipGraphNodeTypeGraph: hipGraphNodeType = hipGraphNodeType(4); +} +impl hipGraphNodeType { + ///< Empty (no-op) node + pub const hipGraphNodeTypeEmpty: hipGraphNodeType = hipGraphNodeType(5); +} +impl hipGraphNodeType { + ///< External event wait node + pub const hipGraphNodeTypeWaitEvent: hipGraphNodeType = hipGraphNodeType(6); +} +impl hipGraphNodeType { + ///< External event record node + pub const hipGraphNodeTypeEventRecord: hipGraphNodeType = hipGraphNodeType(7); +} +impl hipGraphNodeType { + ///< External Semaphore signal node + pub const hipGraphNodeTypeExtSemaphoreSignal: hipGraphNodeType = hipGraphNodeType(8); +} +impl hipGraphNodeType { + ///< External Semaphore wait node + pub const hipGraphNodeTypeExtSemaphoreWait: hipGraphNodeType = hipGraphNodeType(9); +} +impl hipGraphNodeType { + ///< Memory alloc node + pub const hipGraphNodeTypeMemAlloc: hipGraphNodeType = hipGraphNodeType(10); +} +impl hipGraphNodeType { + ///< Memory free node + pub const hipGraphNodeTypeMemFree: hipGraphNodeType = hipGraphNodeType(11); +} +impl hipGraphNodeType { + ///< MemcpyFromSymbol node + pub const hipGraphNodeTypeMemcpyFromSymbol: hipGraphNodeType = hipGraphNodeType(12); +} +impl hipGraphNodeType { + ///< MemcpyToSymbol node + pub const hipGraphNodeTypeMemcpyToSymbol: hipGraphNodeType = hipGraphNodeType(13); +} +impl hipGraphNodeType { + pub const hipGraphNodeTypeCount: hipGraphNodeType = hipGraphNodeType(14); +} +#[repr(transparent)] +/// hipGraphNodeType +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphNodeType(pub ::core::ffi::c_uint); +pub type hipHostFn_t = ::core::option::Option< + unsafe extern "C" fn(userData: *mut ::core::ffi::c_void), +>; +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipHostNodeParams { + pub fn_: hipHostFn_t, + pub userData: *mut ::core::ffi::c_void, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipKernelNodeParams { + pub blockDim: dim3, + pub extra: *mut *mut ::core::ffi::c_void, + pub func: *mut ::core::ffi::c_void, + pub gridDim: dim3, + pub kernelParams: *mut *mut ::core::ffi::c_void, + pub sharedMemBytes: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemsetParams { + pub dst: *mut ::core::ffi::c_void, + pub elementSize: ::core::ffi::c_uint, + pub height: usize, + pub pitch: usize, + pub value: ::core::ffi::c_uint, + pub width: usize, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAllocNodeParams { + /**< Pool properties, which contain where +< the location should reside*/ + pub poolProps: hipMemPoolProps, + /**< The number of memory access descriptors. +< Must not be bigger than the number of GPUs*/ + pub accessDescs: *const hipMemAccessDesc, + ///< The number of access descriptors + pub accessDescCount: usize, + ///< The size of the requested allocation in bytes + pub bytesize: usize, + ///< Returned device address of the allocation + pub dptr: *mut ::core::ffi::c_void, +} +impl hipAccessProperty { + pub const hipAccessPropertyNormal: hipAccessProperty = hipAccessProperty(0); +} +impl hipAccessProperty { + pub const hipAccessPropertyStreaming: hipAccessProperty = hipAccessProperty(1); +} +impl hipAccessProperty { + pub const hipAccessPropertyPersisting: hipAccessProperty = hipAccessProperty(2); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipAccessProperty(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct hipAccessPolicyWindow { + pub base_ptr: *mut ::core::ffi::c_void, + pub hitProp: hipAccessProperty, + pub hitRatio: f32, + pub missProp: hipAccessProperty, + pub num_bytes: usize, +} +impl hipLaunchAttributeID { + ///< Valid for Streams, graph nodes, launches + pub const hipLaunchAttributeAccessPolicyWindow: hipLaunchAttributeID = hipLaunchAttributeID( + 1, + ); +} +impl hipLaunchAttributeID { + ///< Valid for graph nodes, launches + pub const hipLaunchAttributeCooperative: hipLaunchAttributeID = hipLaunchAttributeID( + 2, + ); +} +impl hipLaunchAttributeID { + ///< Valid for graph node, streams, launches + pub const hipLaunchAttributePriority: hipLaunchAttributeID = hipLaunchAttributeID(8); +} +#[repr(transparent)] +/// Launch Attribute ID +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipLaunchAttributeID(pub ::core::ffi::c_uint); +/// Launch Attribute Value +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipLaunchAttributeValue { + /**< Value of launch attribute:: +hipLaunchAttributePolicyWindow.*/ + pub accessPolicyWindow: hipAccessPolicyWindow, + ///< Value of launch attribute ::hipLaunchAttributeCooperative + pub cooperative: ::core::ffi::c_int, + /**< Value of launch attribute :: hipLaunchAttributePriority. Execution +priority of kernel.*/ + pub priority: ::core::ffi::c_int, +} +/// Memset node params +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct HIP_MEMSET_NODE_PARAMS { + ///< Destination pointer on device + pub dst: hipDeviceptr_t, + ///< Destination device pointer pitch. Unused if height equals 1 + pub pitch: usize, + ///< Value of memset to be set + pub value: ::core::ffi::c_uint, + ///< Element in bytes. Must be 1, 2, or 4. + pub elementSize: ::core::ffi::c_uint, + ///< Width of a row + pub width: usize, + ///< Number of rows + pub height: usize, +} +impl hipGraphExecUpdateResult { + ///< The update succeeded + pub const hipGraphExecUpdateSuccess: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 0, + ); +} +impl hipGraphExecUpdateResult { + /**< The update failed for an unexpected reason which is described +< in the return value of the function*/ + pub const hipGraphExecUpdateError: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 1, + ); +} +impl hipGraphExecUpdateResult { + ///< The update failed because the topology changed + pub const hipGraphExecUpdateErrorTopologyChanged: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 2, + ); +} +impl hipGraphExecUpdateResult { + ///< The update failed because a node type changed + pub const hipGraphExecUpdateErrorNodeTypeChanged: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 3, + ); +} +impl hipGraphExecUpdateResult { + pub const hipGraphExecUpdateErrorFunctionChanged: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 4, + ); +} +impl hipGraphExecUpdateResult { + pub const hipGraphExecUpdateErrorParametersChanged: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 5, + ); +} +impl hipGraphExecUpdateResult { + pub const hipGraphExecUpdateErrorNotSupported: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 6, + ); +} +impl hipGraphExecUpdateResult { + pub const hipGraphExecUpdateErrorUnsupportedFunctionChange: hipGraphExecUpdateResult = hipGraphExecUpdateResult( + 7, + ); +} +#[repr(transparent)] +/// Graph execution update result +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphExecUpdateResult(pub ::core::ffi::c_uint); +impl hipStreamCaptureMode { + pub const hipStreamCaptureModeGlobal: hipStreamCaptureMode = hipStreamCaptureMode(0); +} +impl hipStreamCaptureMode { + pub const hipStreamCaptureModeThreadLocal: hipStreamCaptureMode = hipStreamCaptureMode( + 1, + ); +} +impl hipStreamCaptureMode { + pub const hipStreamCaptureModeRelaxed: hipStreamCaptureMode = hipStreamCaptureMode( + 2, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipStreamCaptureMode(pub ::core::ffi::c_uint); +impl hipStreamCaptureStatus { + ///< Stream is not capturing + pub const hipStreamCaptureStatusNone: hipStreamCaptureStatus = hipStreamCaptureStatus( + 0, + ); +} +impl hipStreamCaptureStatus { + ///< Stream is actively capturing + pub const hipStreamCaptureStatusActive: hipStreamCaptureStatus = hipStreamCaptureStatus( + 1, + ); +} +impl hipStreamCaptureStatus { + /**< Stream is part of a capture sequence that has been +< invalidated, but not terminated*/ + pub const hipStreamCaptureStatusInvalidated: hipStreamCaptureStatus = hipStreamCaptureStatus( + 2, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipStreamCaptureStatus(pub ::core::ffi::c_uint); +impl hipStreamUpdateCaptureDependenciesFlags { + ///< Add new nodes to the dependency set + pub const hipStreamAddCaptureDependencies: hipStreamUpdateCaptureDependenciesFlags = hipStreamUpdateCaptureDependenciesFlags( + 0, + ); +} +impl hipStreamUpdateCaptureDependenciesFlags { + ///< Replace the dependency set with the new nodes + pub const hipStreamSetCaptureDependencies: hipStreamUpdateCaptureDependenciesFlags = hipStreamUpdateCaptureDependenciesFlags( + 1, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipStreamUpdateCaptureDependenciesFlags(pub ::core::ffi::c_uint); +impl hipGraphMemAttributeType { + ///< Amount of memory, in bytes, currently associated with graphs + pub const hipGraphMemAttrUsedMemCurrent: hipGraphMemAttributeType = hipGraphMemAttributeType( + 0, + ); +} +impl hipGraphMemAttributeType { + ///< High watermark of memory, in bytes, associated with graphs since the last time. + pub const hipGraphMemAttrUsedMemHigh: hipGraphMemAttributeType = hipGraphMemAttributeType( + 1, + ); +} +impl hipGraphMemAttributeType { + ///< Amount of memory, in bytes, currently allocated for graphs. + pub const hipGraphMemAttrReservedMemCurrent: hipGraphMemAttributeType = hipGraphMemAttributeType( + 2, + ); +} +impl hipGraphMemAttributeType { + ///< High watermark of memory, in bytes, currently allocated for graphs + pub const hipGraphMemAttrReservedMemHigh: hipGraphMemAttributeType = hipGraphMemAttributeType( + 3, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphMemAttributeType(pub ::core::ffi::c_uint); +impl hipUserObjectFlags { + ///< Destructor execution is not synchronized. + pub const hipUserObjectNoDestructorSync: hipUserObjectFlags = hipUserObjectFlags(1); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipUserObjectFlags(pub ::core::ffi::c_uint); +impl hipUserObjectRetainFlags { + ///< Add new reference or retain. + pub const hipGraphUserObjectMove: hipUserObjectRetainFlags = hipUserObjectRetainFlags( + 1, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipUserObjectRetainFlags(pub ::core::ffi::c_uint); +impl hipGraphInstantiateFlags { + pub const hipGraphInstantiateFlagAutoFreeOnLaunch: hipGraphInstantiateFlags = hipGraphInstantiateFlags( + 1, + ); +} +impl hipGraphInstantiateFlags { + pub const hipGraphInstantiateFlagUpload: hipGraphInstantiateFlags = hipGraphInstantiateFlags( + 2, + ); +} +impl hipGraphInstantiateFlags { + pub const hipGraphInstantiateFlagDeviceLaunch: hipGraphInstantiateFlags = hipGraphInstantiateFlags( + 4, + ); +} +impl hipGraphInstantiateFlags { + pub const hipGraphInstantiateFlagUseNodePriority: hipGraphInstantiateFlags = hipGraphInstantiateFlags( + 8, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphInstantiateFlags(pub ::core::ffi::c_uint); +impl hipGraphDebugDotFlags { + pub const hipGraphDebugDotFlagsVerbose: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 1, + ); +} +impl hipGraphDebugDotFlags { + ///< Adds hipKernelNodeParams to output + pub const hipGraphDebugDotFlagsKernelNodeParams: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 4, + ); +} +impl hipGraphDebugDotFlags { + ///< Adds hipMemcpy3DParms to output + pub const hipGraphDebugDotFlagsMemcpyNodeParams: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 8, + ); +} +impl hipGraphDebugDotFlags { + ///< Adds hipMemsetParams to output + pub const hipGraphDebugDotFlagsMemsetNodeParams: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 16, + ); +} +impl hipGraphDebugDotFlags { + ///< Adds hipHostNodeParams to output + pub const hipGraphDebugDotFlagsHostNodeParams: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 32, + ); +} +impl hipGraphDebugDotFlags { + pub const hipGraphDebugDotFlagsEventNodeParams: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 64, + ); +} +impl hipGraphDebugDotFlags { + pub const hipGraphDebugDotFlagsExtSemasSignalNodeParams: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 128, + ); +} +impl hipGraphDebugDotFlags { + pub const hipGraphDebugDotFlagsExtSemasWaitNodeParams: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 256, + ); +} +impl hipGraphDebugDotFlags { + pub const hipGraphDebugDotFlagsKernelNodeAttributes: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 512, + ); +} +impl hipGraphDebugDotFlags { + pub const hipGraphDebugDotFlagsHandles: hipGraphDebugDotFlags = hipGraphDebugDotFlags( + 1024, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphDebugDotFlags(pub ::core::ffi::c_uint); +impl hipGraphInstantiateResult { + ///< Instantiation Success + pub const hipGraphInstantiateSuccess: hipGraphInstantiateResult = hipGraphInstantiateResult( + 0, + ); +} +impl hipGraphInstantiateResult { + /**< Instantiation failed for an +unexpected reason which is described in the return value of the function*/ + pub const hipGraphInstantiateError: hipGraphInstantiateResult = hipGraphInstantiateResult( + 1, + ); +} +impl hipGraphInstantiateResult { + /**< Instantiation failed due +to invalid structure, such as cycles*/ + pub const hipGraphInstantiateInvalidStructure: hipGraphInstantiateResult = hipGraphInstantiateResult( + 2, + ); +} +impl hipGraphInstantiateResult { + /**< Instantiation for device launch failed +because the graph contained an unsupported operation*/ + pub const hipGraphInstantiateNodeOperationNotSupported: hipGraphInstantiateResult = hipGraphInstantiateResult( + 3, + ); +} +impl hipGraphInstantiateResult { + /**< Instantiation for device launch failed +due to the nodes belonging to different contexts*/ + pub const hipGraphInstantiateMultipleDevicesNotSupported: hipGraphInstantiateResult = hipGraphInstantiateResult( + 4, + ); +} +#[repr(transparent)] +/// hipGraphInstantiateWithParams results +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphInstantiateResult(pub ::core::ffi::c_uint); +/// Graph Instantiation parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphInstantiateParams { + ///< The node which caused instantiation to fail, if any + pub errNode_out: hipGraphNode_t, + ///< Instantiation flags + pub flags: ::core::ffi::c_ulonglong, + /**< Whether instantiation was successful. +If it failed, the reason why*/ + pub result_out: hipGraphInstantiateResult, + ///< Upload stream + pub uploadStream: hipStream_t, +} +/// Memory allocation properties +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAllocationProp { + ///< Memory allocation type + pub type_: hipMemAllocationType, + ///< Requested handle type + pub requestedHandleType: hipMemAllocationHandleType, + ///< Memory location + pub location: hipMemLocation, + ///< Metadata for Win32 handles + pub win32HandleMetaData: *mut ::core::ffi::c_void, + pub allocFlags: hipMemAllocationProp__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAllocationProp__bindgen_ty_1 { + ///< Compression type + pub compressionType: ::core::ffi::c_uchar, + ///< RDMA capable + pub gpuDirectRDMACapable: ::core::ffi::c_uchar, + ///< Usage + pub usage: ::core::ffi::c_ushort, +} +/// External semaphore signal node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreSignalNodeParams { + pub extSemArray: *mut hipExternalSemaphore_t, + pub paramsArray: *const hipExternalSemaphoreSignalParams, + pub numExtSems: ::core::ffi::c_uint, +} +/// External semaphore wait node parameters +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipExternalSemaphoreWaitNodeParams { + pub extSemArray: *mut hipExternalSemaphore_t, + pub paramsArray: *const hipExternalSemaphoreWaitParams, + pub numExtSems: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ihipMemGenericAllocationHandle { + _unused: [u8; 0], +} +/// Generic handle for memory allocation +pub type hipMemGenericAllocationHandle_t = *mut ihipMemGenericAllocationHandle; +impl hipMemAllocationGranularity_flags { + ///< Minimum granularity + pub const hipMemAllocationGranularityMinimum: hipMemAllocationGranularity_flags = hipMemAllocationGranularity_flags( + 0, + ); +} +impl hipMemAllocationGranularity_flags { + ///< Recommended granularity for performance + pub const hipMemAllocationGranularityRecommended: hipMemAllocationGranularity_flags = hipMemAllocationGranularity_flags( + 1, + ); +} +#[repr(transparent)] +/// Flags for granularity +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemAllocationGranularity_flags(pub ::core::ffi::c_uint); +impl hipMemHandleType { + ///< Generic handle type + pub const hipMemHandleTypeGeneric: hipMemHandleType = hipMemHandleType(0); +} +#[repr(transparent)] +/// Memory handle type +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemHandleType(pub ::core::ffi::c_uint); +impl hipMemOperationType { + ///< Map operation + pub const hipMemOperationTypeMap: hipMemOperationType = hipMemOperationType(1); +} +impl hipMemOperationType { + ///< Unmap operation + pub const hipMemOperationTypeUnmap: hipMemOperationType = hipMemOperationType(2); +} +#[repr(transparent)] +/// Memory operation types +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemOperationType(pub ::core::ffi::c_uint); +impl hipArraySparseSubresourceType { + ///< Sparse level + pub const hipArraySparseSubresourceTypeSparseLevel: hipArraySparseSubresourceType = hipArraySparseSubresourceType( + 0, + ); +} +impl hipArraySparseSubresourceType { + ///< Miptail + pub const hipArraySparseSubresourceTypeMiptail: hipArraySparseSubresourceType = hipArraySparseSubresourceType( + 1, + ); +} +#[repr(transparent)] +/// Subresource types for sparse arrays +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipArraySparseSubresourceType(pub ::core::ffi::c_uint); +/// Map info for arrays +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipArrayMapInfo { + ///< Resource type + pub resourceType: hipResourceType, + pub resource: hipArrayMapInfo__bindgen_ty_1, + ///< Sparse subresource type + pub subresourceType: hipArraySparseSubresourceType, + pub subresource: hipArrayMapInfo__bindgen_ty_2, + ///< Memory operation type + pub memOperationType: hipMemOperationType, + ///< Memory handle type + pub memHandleType: hipMemHandleType, + pub memHandle: hipArrayMapInfo__bindgen_ty_3, + ///< Offset within the memory + pub offset: ::core::ffi::c_ulonglong, + ///< Device ordinal bit mask + pub deviceBitMask: ::core::ffi::c_uint, + ///< flags for future use, must be zero now. + pub flags: ::core::ffi::c_uint, + ///< Reserved for future use, must be zero now. + pub reserved: [::core::ffi::c_uint; 2usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipArrayMapInfo__bindgen_ty_1 { + pub mipmap: hipMipmappedArray, + pub array: hipArray_t, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipArrayMapInfo__bindgen_ty_2 { + pub sparseLevel: hipArrayMapInfo__bindgen_ty_2__bindgen_ty_1, + pub miptail: hipArrayMapInfo__bindgen_ty_2__bindgen_ty_2, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipArrayMapInfo__bindgen_ty_2__bindgen_ty_1 { + ///< For mipmapped arrays must be a valid mipmap level. For arrays must be zero + pub level: ::core::ffi::c_uint, + ///< For layered arrays must be a valid layer index. Otherwise, must be zero + pub layer: ::core::ffi::c_uint, + ///< X offset in elements + pub offsetX: ::core::ffi::c_uint, + ///< Y offset in elements + pub offsetY: ::core::ffi::c_uint, + ///< Z offset in elements + pub offsetZ: ::core::ffi::c_uint, + ///< Width in elements + pub extentWidth: ::core::ffi::c_uint, + ///< Height in elements + pub extentHeight: ::core::ffi::c_uint, + ///< Depth in elements + pub extentDepth: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipArrayMapInfo__bindgen_ty_2__bindgen_ty_2 { + ///< For layered arrays must be a valid layer index. Otherwise, must be zero + pub layer: ::core::ffi::c_uint, + ///< Offset within mip tail + pub offset: ::core::ffi::c_ulonglong, + ///< Extent in bytes + pub size: ::core::ffi::c_ulonglong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipArrayMapInfo__bindgen_ty_3 { + pub memHandle: hipMemGenericAllocationHandle_t, +} +/// Memcpy node params +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemcpyNodeParams { + ///< Must be zero. + pub flags: ::core::ffi::c_int, + ///< Must be zero. + pub reserved: [::core::ffi::c_int; 3usize], + ///< Params set for the memory copy. + pub copyParams: hipMemcpy3DParms, +} +/// Child graph node params +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipChildGraphNodeParams { + /**< Either the child graph to clone into the node, or +< a handle to the graph possesed by the node used during query*/ + pub graph: hipGraph_t, +} +/// Event record node params +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipEventWaitNodeParams { + ///< Event to wait on + pub event: hipEvent_t, +} +/// Event record node params +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipEventRecordNodeParams { + ///< The event to be recorded when node executes + pub event: hipEvent_t, +} +/// Memory free node params +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipMemFreeNodeParams { + ///< the pointer to be freed + pub dptr: *mut ::core::ffi::c_void, +} +/// Params for different graph nodes +#[repr(C)] +#[derive(Copy, Clone)] +pub struct hipGraphNodeParams { + pub type_: hipGraphNodeType, + pub reserved0: [::core::ffi::c_int; 3usize], + pub __bindgen_anon_1: hipGraphNodeParams__bindgen_ty_1, + pub reserved2: ::core::ffi::c_longlong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union hipGraphNodeParams__bindgen_ty_1 { + pub reserved1: [::core::ffi::c_longlong; 29usize], + pub kernel: hipKernelNodeParams, + pub memcpy: hipMemcpyNodeParams, + pub memset: hipMemsetParams, + pub host: hipHostNodeParams, + pub graph: hipChildGraphNodeParams, + pub eventWait: hipEventWaitNodeParams, + pub eventRecord: hipEventRecordNodeParams, + pub extSemSignal: hipExternalSemaphoreSignalNodeParams, + pub extSemWait: hipExternalSemaphoreWaitNodeParams, + pub alloc: hipMemAllocNodeParams, + pub free: hipMemFreeNodeParams, +} +impl hipGraphDependencyType { + pub const hipGraphDependencyTypeDefault: hipGraphDependencyType = hipGraphDependencyType( + 0, + ); +} +impl hipGraphDependencyType { + pub const hipGraphDependencyTypeProgrammatic: hipGraphDependencyType = hipGraphDependencyType( + 1, + ); +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphDependencyType(pub ::core::ffi::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct hipGraphEdgeData { + /**< This indicates when the dependency is triggered from the upstream node on the +< edge. The meaning is specfic to the node type. A value of 0 in all cases +< means full completion of the upstream node, with memory visibility to the +< downstream node or portion thereof (indicated by to_port). Only kernel nodes +< define non-zero ports. A kernel node can use the following output port types: +< hipGraphKernelNodePortDefault, hipGraphKernelNodePortProgrammatic, or +< hipGraphKernelNodePortLaunchCompletion.*/ + pub from_port: ::core::ffi::c_uchar, + ///< These bytes are unused and must be zeroed + pub reserved: [::core::ffi::c_uchar; 5usize], + ///< Currently no node types define non-zero ports. This field must be set to zero. + pub to_port: ::core::ffi::c_uchar, + ///< This should be populated with a value from hipGraphDependencyType + pub type_: ::core::ffi::c_uchar, +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n @defgroup API HIP API\n @{\n\n Defines the HIP API. See the individual sections for more information.\n/\n/**\n @defgroup Driver Initialization and Version\n @{\n This section describes the initializtion and version functions of HIP runtime API.\n\n/\n/**\n @brief Explicitly initializes the HIP runtime.\n\n @param [in] flags Initialization flag, should be zero.\n\n Most HIP APIs implicitly initialize the HIP runtime.\n This API provides control over the timing of the initialization.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] + pub fn hipInit(flags: ::core::ffi::c_uint) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the approximate HIP driver version. + + @param [out] driverVersion driver version + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning The HIP feature set does not correspond to an exact CUDA SDK driver revision. + This function always set *driverVersion to 4 as an approximation though HIP supports + some features which were introduced in later CUDA SDK revisions. + HIP apps code should not rely on the driver revision number here and should + use arch feature flags to test device capabilities or conditional compilation. + + @see hipRuntimeGetVersion*/ + pub fn hipDriverGetVersion(driverVersion: *mut ::core::ffi::c_int) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the approximate HIP Runtime version. + + @param [out] runtimeVersion HIP runtime version + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning The version definition of HIP runtime is different from CUDA. + On AMD platform, the function returns HIP runtime version, + while on NVIDIA platform, it returns CUDA runtime version. + And there is no mapping/correlation between HIP version and CUDA version. + + @see hipDriverGetVersion*/ + pub fn hipRuntimeGetVersion(runtimeVersion: *mut ::core::ffi::c_int) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a handle to a compute device + @param [out] device Handle of device + @param [in] ordinal Device ordinal + + @returns #hipSuccess, #hipErrorInvalidDevice*/ + pub fn hipDeviceGet( + device: *mut hipDevice_t, + ordinal: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the compute capability of the device + @param [out] major Major compute capability version number + @param [out] minor Minor compute capability version number + @param [in] device Device ordinal + + @returns #hipSuccess, #hipErrorInvalidDevice*/ + pub fn hipDeviceComputeCapability( + major: *mut ::core::ffi::c_int, + minor: *mut ::core::ffi::c_int, + device: hipDevice_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns an identifer string for the device. + @param [out] name String of the device name + @param [in] len Maximum length of string to store in device name + @param [in] device Device ordinal + + @returns #hipSuccess, #hipErrorInvalidDevice*/ + pub fn hipDeviceGetName( + name: *mut ::core::ffi::c_char, + len: ::core::ffi::c_int, + device: hipDevice_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns an UUID for the device.[BETA] + @param [out] uuid UUID for the device + @param [in] device device ordinal + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue, #hipErrorNotInitialized, + #hipErrorDeinitialized*/ + pub fn hipDeviceGetUuid(uuid: *mut hipUUID, device: hipDevice_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a value for attribute of link between two devices + @param [out] value Pointer of the value for the attrubute + @param [in] attr enum of hipDeviceP2PAttr to query + @param [in] srcDevice The source device of the link + @param [in] dstDevice The destination device of the link + + @returns #hipSuccess, #hipErrorInvalidDevice*/ + pub fn hipDeviceGetP2PAttribute( + value: *mut ::core::ffi::c_int, + attr: hipDeviceP2PAttr, + srcDevice: ::core::ffi::c_int, + dstDevice: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a PCI Bus Id string for the device, overloaded to take int device ID. + @param [out] pciBusId The string of PCI Bus Id format for the device + @param [in] len Maximum length of string + @param [in] device The device ordinal + + @returns #hipSuccess, #hipErrorInvalidDevice*/ + pub fn hipDeviceGetPCIBusId( + pciBusId: *mut ::core::ffi::c_char, + len: ::core::ffi::c_int, + device: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a handle to a compute device. + @param [out] device The handle of the device + @param [in] pciBusId The string of PCI Bus Id for the device + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue*/ + pub fn hipDeviceGetByPCIBusId( + device: *mut ::core::ffi::c_int, + pciBusId: *const ::core::ffi::c_char, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the total amount of memory on the device. + @param [out] bytes The size of memory in bytes, on the device + @param [in] device The ordinal of the device + + @returns #hipSuccess, #hipErrorInvalidDevice*/ + pub fn hipDeviceTotalMem(bytes: *mut usize, device: hipDevice_t) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n @defgroup Device Device Management\n @{\n This section describes the device management functions of HIP runtime API.\n/\n/**\n @brief Waits on all active streams on current device\n\n When this command is invoked, the host thread gets blocked until all the commands associated\n with streams associated with the device. HIP does not support multiple blocking modes (yet!).\n\n @returns #hipSuccess\n\n @see hipSetDevice, hipDeviceReset"] + pub fn hipDeviceSynchronize() -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief The state of current device is discarded and updated to a fresh state. + + Calling this function deletes all streams created, memory allocated, kernels running, events + created. Make sure that no other thread is using the device or streams, memory, kernels, events + associated with the current device. + + @returns #hipSuccess + + @see hipDeviceSynchronize*/ + pub fn hipDeviceReset() -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set default device to be used for subsequent hip API calls from this thread. + + @param[in] deviceId Valid device in range 0...hipGetDeviceCount(). + + Sets @p device as the default device for the calling host thread. Valid device id's are 0... + (hipGetDeviceCount()-1). + + Many HIP APIs implicitly use the "default device" : + + - Any device memory subsequently allocated from this host thread (using hipMalloc) will be + allocated on device. + - Any streams or events created from this host thread will be associated with device. + - Any kernels launched from this host thread (using hipLaunchKernel) will be executed on device + (unless a specific stream is specified, in which case the device associated with that stream will + be used). + + This function may be called from any host thread. Multiple host threads may use the same device. + This function does no synchronization with the previous or new device, and has very little + runtime overhead. Applications can use hipSetDevice to quickly switch the default device before + making a HIP runtime call which uses the default device. + + The default device is stored in thread-local-storage for each thread. + Thread-pool implementations may inherit the default device of the previous thread. A good + practice is to always call hipSetDevice at the start of HIP coding sequency to establish a known + standard device. + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorNoDevice + + @see #hipGetDevice, #hipGetDeviceCount*/ + pub fn hipSetDevice(deviceId: ::core::ffi::c_int) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set a list of devices that can be used. + + @param[in] device_arr List of devices to try + @param[in] len Number of devices in specified list + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see #hipGetDevice, #hipGetDeviceCount. #hipSetDevice. #hipGetDeviceProperties. #hipSetDeviceFlags. #hipChooseDevice +*/ + pub fn hipSetValidDevices( + device_arr: *mut ::core::ffi::c_int, + len: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return the default device id for the calling host thread. + + @param [out] deviceId *device is written with the default device + + HIP maintains an default device for each thread using thread-local-storage. + This device is used implicitly for HIP runtime APIs called by this thread. + hipGetDevice returns in * @p device the default device for the calling host thread. + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see hipSetDevice, hipGetDevicesizeBytes*/ + pub fn hipGetDevice(deviceId: *mut ::core::ffi::c_int) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return number of compute-capable devices. + + @param [out] count Returns number of compute-capable devices. + + @returns #hipSuccess, #hipErrorNoDevice + + + Returns in @p *count the number of devices that have ability to run compute commands. If there + are no such devices, then @ref hipGetDeviceCount will return #hipErrorNoDevice. If 1 or more + devices can be found, then hipGetDeviceCount returns #hipSuccess.*/ + pub fn hipGetDeviceCount(count: *mut ::core::ffi::c_int) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Query for a specific device attribute. + + @param [out] pi pointer to value to return + @param [in] attr attribute to query + @param [in] deviceId which device to query for information + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue*/ + pub fn hipDeviceGetAttribute( + pi: *mut ::core::ffi::c_int, + attr: hipDeviceAttribute_t, + deviceId: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the default memory pool of the specified device + + @param [out] mem_pool Default memory pool to return + @param [in] device Device index for query the default memory pool + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue, #hipErrorNotSupported + + @see hipDeviceGetDefaultMemPool, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute, + hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDeviceGetDefaultMemPool( + mem_pool: *mut hipMemPool_t, + device: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the current memory pool of a device + + The memory pool must be local to the specified device. + @p hipMallocAsync allocates from the current mempool of the provided stream's device. + By default, a device's current memory pool is its default memory pool. + + @note Use @p hipMallocFromPoolAsync for asynchronous memory allocations from a device + different than the one the stream runs on. + + @param [in] device Device index for the update + @param [in] mem_pool Memory pool for update as the current on the specified device + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDevice, #hipErrorNotSupported + + @see hipDeviceGetDefaultMemPool, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute, + hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDeviceSetMemPool( + device: ::core::ffi::c_int, + mem_pool: hipMemPool_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the current memory pool for the specified device + + Returns the last pool provided to @p hipDeviceSetMemPool for this device + or the device's default memory pool if @p hipDeviceSetMemPool has never been called. + By default the current mempool is the default mempool for a device, + otherwise the returned pool must have been set with @p hipDeviceSetMemPool. + + @param [out] mem_pool Current memory pool on the specified device + @param [in] device Device index to query the current memory pool + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @see hipDeviceGetDefaultMemPool, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute, + hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDeviceGetMemPool( + mem_pool: *mut hipMemPool_t, + device: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns device properties. + + @param [out] prop written with device properties + @param [in] deviceId which device to query for information + + @return #hipSuccess, #hipErrorInvalidDevice + @bug HCC always returns 0 for maxThreadsPerMultiProcessor + @bug HCC always returns 0 for regsPerBlock + @bug HCC always returns 0 for l2CacheSize + + Populates hipGetDeviceProperties with information for the specified device.*/ + pub fn hipGetDevicePropertiesR0600( + prop: *mut hipDeviceProp_tR0600, + deviceId: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set L1/Shared cache partition. + + @param [in] cacheConfig Cache configuration + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorNotSupported + + Note: AMD devices do not support reconfigurable cache. This API is not implemented + on AMD platform. If the function is called, it will return hipErrorNotSupported. +*/ + pub fn hipDeviceSetCacheConfig(cacheConfig: hipFuncCache_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get Cache configuration for a specific Device + + @param [out] cacheConfig Pointer of cache configuration + + @returns #hipSuccess, #hipErrorNotInitialized + Note: AMD devices do not support reconfigurable cache. This hint is ignored + on these architectures. +*/ + pub fn hipDeviceGetCacheConfig(cacheConfig: *mut hipFuncCache_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets resource limits of current device + + The function queries the size of limit value, as required by the input enum value hipLimit_t, + which can be either #hipLimitStackSize, or #hipLimitMallocHeapSize. Any other input as + default, the function will return #hipErrorUnsupportedLimit. + + @param [out] pValue Returns the size of the limit in bytes + @param [in] limit The limit to query + + @returns #hipSuccess, #hipErrorUnsupportedLimit, #hipErrorInvalidValue +*/ + pub fn hipDeviceGetLimit(pValue: *mut usize, limit: hipLimit_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets resource limits of current device. + + As the input enum limit, + #hipLimitStackSize sets the limit value of the stack size on the current GPU device, per thread. + The limit size can get via hipDeviceGetLimit. The size is in units of 256 dwords, up to the limit + (128K - 16). + + #hipLimitMallocHeapSize sets the limit value of the heap used by the malloc()/free() + calls. For limit size, use the #hipDeviceGetLimit API. + + Any other input as default, the funtion will return hipErrorUnsupportedLimit. + + @param [in] limit Enum of hipLimit_t to set + @param [in] value The size of limit value in bytes + + @returns #hipSuccess, #hipErrorUnsupportedLimit, #hipErrorInvalidValue +*/ + pub fn hipDeviceSetLimit(limit: hipLimit_t, value: usize) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns bank width of shared memory for current device + + @param [out] pConfig The pointer of the bank width for shared memory + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized + + Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is + ignored on those architectures. +*/ + pub fn hipDeviceGetSharedMemConfig(pConfig: *mut hipSharedMemConfig) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the flags set for current device + + @param [out] flags Pointer of the flags + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue*/ + pub fn hipGetDeviceFlags(flags: *mut ::core::ffi::c_uint) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief The bank width of shared memory on current device is set + + @param [in] config Configuration for the bank width of shared memory + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized + + Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is + ignored on those architectures. +*/ + pub fn hipDeviceSetSharedMemConfig(config: hipSharedMemConfig) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief The current device behavior is changed according the flags passed. + + @param [in] flags Flag to set on the current device + + The schedule flags impact how HIP waits for the completion of a command running on a device. + hipDeviceScheduleSpin : HIP runtime will actively spin in the thread which submitted the + work until the command completes. This offers the lowest latency, but will consume a CPU core + and may increase power. hipDeviceScheduleYield : The HIP runtime will yield the CPU to + system so that other tasks can use it. This may increase latency to detect the completion but + will consume less power and is friendlier to other tasks in the system. + hipDeviceScheduleBlockingSync : On ROCm platform, this is a synonym for hipDeviceScheduleYield. + hipDeviceScheduleAuto : Use a hueristic to select between Spin and Yield modes. If the + number of HIP contexts is greater than the number of logical processors in the system, use Spin + scheduling. Else use Yield scheduling. + + + hipDeviceMapHost : Allow mapping host memory. On ROCM, this is always allowed and + the flag is ignored. hipDeviceLmemResizeToMax : @warning ROCm silently ignores this flag. + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorSetOnActiveProcess + +*/ + pub fn hipSetDeviceFlags(flags: ::core::ffi::c_uint) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Device which matches hipDeviceProp_t is returned + + @param [out] device Pointer of the device + @param [in] prop Pointer of the properties + + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipChooseDeviceR0600( + device: *mut ::core::ffi::c_int, + prop: *const hipDeviceProp_tR0600, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the link type and hop count between two devices + + @param [in] device1 Ordinal for device1 + @param [in] device2 Ordinal for device2 + @param [out] linktype Returns the link type (See hsa_amd_link_info_type_t) between the two devices + @param [out] hopcount Returns the hop count between the two devices + + Queries and returns the HSA link type and the hop count between the two specified devices. + + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipExtGetLinkTypeAndHopCount( + device1: ::core::ffi::c_int, + device2: ::core::ffi::c_int, + linktype: *mut u32, + hopcount: *mut u32, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets an interprocess memory handle for an existing device memory + allocation + + Takes a pointer to the base of an existing device memory allocation created + with hipMalloc and exports it for use in another process. This is a + lightweight operation and may be called multiple times on an allocation + without adverse effects. + + If a region of memory is freed with hipFree and a subsequent call + to hipMalloc returns memory with the same device address, + hipIpcGetMemHandle will return a unique handle for the + new memory. + + @param handle - Pointer to user allocated hipIpcMemHandle to return + the handle in. + @param devPtr - Base pointer to previously allocated device memory + + @returns #hipSuccess, #hipErrorInvalidHandle, #hipErrorOutOfMemory, #hipErrorMapFailed + + @note This IPC memory related feature API on Windows may behave differently from Linux. +*/ + pub fn hipIpcGetMemHandle( + handle: *mut hipIpcMemHandle_t, + devPtr: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Opens an interprocess memory handle exported from another process + and returns a device pointer usable in the local process. + + Maps memory exported from another process with hipIpcGetMemHandle into + the current device address space. For contexts on different devices + hipIpcOpenMemHandle can attempt to enable peer access between the + devices as if the user called hipDeviceEnablePeerAccess. This behavior is + controlled by the hipIpcMemLazyEnablePeerAccess flag. + hipDeviceCanAccessPeer can determine if a mapping is possible. + + Contexts that may open hipIpcMemHandles are restricted in the following way. + hipIpcMemHandles from each device in a given process may only be opened + by one context per device per other process. + + Memory returned from hipIpcOpenMemHandle must be freed with + hipIpcCloseMemHandle. + + Calling hipFree on an exported memory region before calling + hipIpcCloseMemHandle in the importing context will result in undefined + behavior. + + @param devPtr - Returned device pointer + @param handle - hipIpcMemHandle to open + @param flags - Flags for this operation. Must be specified as hipIpcMemLazyEnablePeerAccess + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext, + #hipErrorInvalidDevicePointer + + @note During multiple processes, using the same memory handle opened by the current context, + there is no guarantee that the same device poiter will be returned in @p *devPtr. + This is diffrent from CUDA. + @note This IPC memory related feature API on Windows may behave differently from Linux. +*/ + pub fn hipIpcOpenMemHandle( + devPtr: *mut *mut ::core::ffi::c_void, + handle: hipIpcMemHandle_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Close memory mapped with hipIpcOpenMemHandle + + Unmaps memory returnd by hipIpcOpenMemHandle. The original allocation + in the exporting process as well as imported mappings in other processes + will be unaffected. + + Any resources used to enable peer access will be freed if this is the + last mapping using them. + + @param devPtr - Device pointer returned by hipIpcOpenMemHandle + + @returns #hipSuccess, #hipErrorMapFailed, #hipErrorInvalidHandle + + @note This IPC memory related feature API on Windows may behave differently from Linux. +*/ + pub fn hipIpcCloseMemHandle(devPtr: *mut ::core::ffi::c_void) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets an opaque interprocess handle for an event. + + This opaque handle may be copied into other processes and opened with hipIpcOpenEventHandle. + Then hipEventRecord, hipEventSynchronize, hipStreamWaitEvent and hipEventQuery may be used in + either process. Operations on the imported event after the exported event has been freed with hipEventDestroy + will result in undefined behavior. + + @param[out] handle Pointer to hipIpcEventHandle to return the opaque event handle + @param[in] event Event allocated with hipEventInterprocess and hipEventDisableTiming flags + + @returns #hipSuccess, #hipErrorInvalidConfiguration, #hipErrorInvalidValue + + @note This IPC event related feature API is currently applicable on Linux. +*/ + pub fn hipIpcGetEventHandle( + handle: *mut hipIpcEventHandle_t, + event: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Opens an interprocess event handles. + + Opens an interprocess event handle exported from another process with hipIpcGetEventHandle. The returned + hipEvent_t behaves like a locally created event with the hipEventDisableTiming flag specified. This event + need be freed with hipEventDestroy. Operations on the imported event after the exported event has been freed + with hipEventDestroy will result in undefined behavior. If the function is called within the same process where + handle is returned by hipIpcGetEventHandle, it will return hipErrorInvalidContext. + + @param[out] event Pointer to hipEvent_t to return the event + @param[in] handle The opaque interprocess handle to open + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext + + @note This IPC event related feature API is currently applicable on Linux. +*/ + pub fn hipIpcOpenEventHandle( + event: *mut hipEvent_t, + handle: hipIpcEventHandle_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n\n @defgroup Execution Execution Control\n @{\n This section describes the execution control functions of HIP runtime API.\n\n/\n/**\n @brief Set attribute for a specific function\n\n @param [in] func Pointer of the function\n @param [in] attr Attribute to set\n @param [in] value Value to set\n\n @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue\n\n Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is\n ignored on those architectures.\n"] + pub fn hipFuncSetAttribute( + func: *const ::core::ffi::c_void, + attr: hipFuncAttribute, + value: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set Cache configuration for a specific function + + @param [in] func Pointer of the function. + @param [in] config Configuration to set. + + @returns #hipSuccess, #hipErrorNotInitialized + Note: AMD devices and some Nvidia GPUS do not support reconfigurable cache. This hint is ignored + on those architectures. +*/ + pub fn hipFuncSetCacheConfig( + func: *const ::core::ffi::c_void, + config: hipFuncCache_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set shared memory configuation for a specific function + + @param [in] func Pointer of the function + @param [in] config Configuration + + @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue + + Note: AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is + ignored on those architectures. +*/ + pub fn hipFuncSetSharedMemConfig( + func: *const ::core::ffi::c_void, + config: hipSharedMemConfig, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Error Error Handling\n @{\n This section describes the error handling functions of HIP runtime API.\n/\n/**\n @brief Return last error returned by any HIP runtime API call and resets the stored error code to\n #hipSuccess\n\n @returns return code from last HIP called from the active host thread\n\n Returns the last error that has been returned by any of the runtime calls in the same host\n thread, and then resets the saved error to #hipSuccess.\n\n @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t"] + pub fn hipGetLastError() -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return last error returned by any HIP runtime API call and resets the stored error code to + #hipSuccess + + @returns return code from last HIP called from the active host thread + + Returns the last error that has been returned by any of the runtime calls in the same host + thread, and then resets the saved error to #hipSuccess. + + @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t*/ + pub fn hipExtGetLastError() -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return last error returned by any HIP runtime API call. + + @return #hipSuccess + + Returns the last error that has been returned by any of the runtime calls in the same host + thread. Unlike hipGetLastError, this function does not reset the saved error code. + + @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t*/ + pub fn hipPeekAtLastError() -> hipError_t; +} +extern "C" { + /** @brief Return hip error as text string form. + + @param hip_error Error code to convert to name. + @return const char pointer to the NULL-terminated error name + + @see hipGetErrorString, hipGetLastError, hipPeakAtLastError, hipError_t*/ + pub fn hipGetErrorName(hip_error: hipError_t) -> *const ::core::ffi::c_char; +} +extern "C" { + /** @brief Return handy text string message to explain the error which occurred + + @param hipError Error code to convert to string. + @return const char pointer to the NULL-terminated error string + + @see hipGetErrorName, hipGetLastError, hipPeakAtLastError, hipError_t*/ + pub fn hipGetErrorString(hipError: hipError_t) -> *const ::core::ffi::c_char; +} +extern "C" { + #[must_use] + /** @brief Return hip error as text string form. + + @param [in] hipError Error code to convert to string. + @param [out] errorString char pointer to the NULL-terminated error string + @return #hipSuccess, #hipErrorInvalidValue + + @see hipGetErrorName, hipGetLastError, hipPeakAtLastError, hipError_t*/ + pub fn hipDrvGetErrorName( + hipError: hipError_t, + errorString: *mut *const ::core::ffi::c_char, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return handy text string message to explain the error which occurred + + @param [in] hipError Error code to convert to string. + @param [out] errorString char pointer to the NULL-terminated error string + @return #hipSuccess, #hipErrorInvalidValue + + @see hipGetErrorName, hipGetLastError, hipPeakAtLastError, hipError_t*/ + pub fn hipDrvGetErrorString( + hipError: hipError_t, + errorString: *mut *const ::core::ffi::c_char, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create an asynchronous stream. + + @param[in, out] stream Valid pointer to hipStream_t. This function writes the memory with the + newly created stream. + @return #hipSuccess, #hipErrorInvalidValue + + Create a new asynchronous stream. @p stream returns an opaque handle that can be used to + reference the newly created stream in subsequent hipStream* commands. The stream is allocated on + the heap and will remain allocated even if the handle goes out-of-scope. To release the memory + used by the stream, application must call hipStreamDestroy. + + @return #hipSuccess, #hipErrorInvalidValue + + @see hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy*/ + pub fn hipStreamCreate(stream: *mut hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create an asynchronous stream. + + @param[in, out] stream Pointer to new stream + @param[in ] flags to control stream creation. + @return #hipSuccess, #hipErrorInvalidValue + + Create a new asynchronous stream. @p stream returns an opaque handle that can be used to + reference the newly created stream in subsequent hipStream* commands. The stream is allocated on + the heap and will remain allocated even if the handle goes out-of-scope. To release the memory + used by the stream, application must call hipStreamDestroy. Flags controls behavior of the + stream. See #hipStreamDefault, #hipStreamNonBlocking. + + + @see hipStreamCreate, hipStreamCreateWithPriority, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy*/ + pub fn hipStreamCreateWithFlags( + stream: *mut hipStream_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create an asynchronous stream with the specified priority. + + @param[in, out] stream Pointer to new stream + @param[in ] flags to control stream creation. + @param[in ] priority of the stream. Lower numbers represent higher priorities. + @return #hipSuccess, #hipErrorInvalidValue + + Create a new asynchronous stream with the specified priority. @p stream returns an opaque handle + that can be used to reference the newly created stream in subsequent hipStream* commands. The + stream is allocated on the heap and will remain allocated even if the handle goes out-of-scope. + To release the memory used by the stream, application must call hipStreamDestroy. Flags controls + behavior of the stream. See #hipStreamDefault, #hipStreamNonBlocking. + + + @see hipStreamCreate, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy*/ + pub fn hipStreamCreateWithPriority( + stream: *mut hipStream_t, + flags: ::core::ffi::c_uint, + priority: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns numerical values that correspond to the least and greatest stream priority. + + @param[in, out] leastPriority pointer in which value corresponding to least priority is returned. + @param[in, out] greatestPriority pointer in which value corresponding to greatest priority is returned. + @returns #hipSuccess + + Returns in *leastPriority and *greatestPriority the numerical values that correspond to the least + and greatest stream priority respectively. Stream priorities follow a convention where lower numbers + imply greater priorities. The range of meaningful stream priorities is given by + [*greatestPriority, *leastPriority]. If the user attempts to create a stream with a priority value + that is outside the meaningful range as specified by this API, the priority is automatically + clamped to within the valid range.*/ + pub fn hipDeviceGetStreamPriorityRange( + leastPriority: *mut ::core::ffi::c_int, + greatestPriority: *mut ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys the specified stream. + + @param[in] stream stream identifier. + @return #hipSuccess #hipErrorInvalidHandle + + Destroys the specified stream. + + If commands are still executing on the specified stream, some may complete execution before the + queue is deleted. + + The queue may be destroyed while some commands are still inflight, or may wait for all commands + queued to the stream before destroying it. + + @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamQuery, + hipStreamWaitEvent, hipStreamSynchronize*/ + pub fn hipStreamDestroy(stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return #hipSuccess if all of the operations in the specified @p stream have completed, or + #hipErrorNotReady if not. + + @param[in] stream stream to query + + @return #hipSuccess, #hipErrorNotReady, #hipErrorInvalidHandle + + This is thread-safe and returns a snapshot of the current state of the queue. However, if other + host threads are sending work to the stream, the status may change immediately after the function + is called. It is typically used for debug. + + @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamWaitEvent, + hipStreamSynchronize, hipStreamDestroy*/ + pub fn hipStreamQuery(stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Wait for all commands in stream to complete. + + @param[in] stream stream identifier. + + @return #hipSuccess, #hipErrorInvalidHandle + + This command is host-synchronous : the host will block until the specified stream is empty. + + This command follows standard null-stream semantics. Specifically, specifying the null stream + will cause the command to wait for other streams on the same device to complete all pending + operations. + + This command honors the hipDeviceLaunchBlocking flag, which controls whether the wait is active + or blocking. + + @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamWaitEvent, + hipStreamDestroy +*/ + pub fn hipStreamSynchronize(stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Make the specified compute stream wait for an event + + @param[in] stream stream to make wait. + @param[in] event event to wait on + @param[in] flags control operation [must be 0] + + @return #hipSuccess, #hipErrorInvalidHandle + + This function inserts a wait operation into the specified stream. + All future work submitted to @p stream will wait until @p event reports completion before + beginning execution. + + This function only waits for commands in the current stream to complete. Notably, this function + does not implicitly wait for commands in the default stream to complete, even if the specified + stream is created with hipStreamNonBlocking = 0. + + @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamCreateWithPriority, hipStreamSynchronize, hipStreamDestroy*/ + pub fn hipStreamWaitEvent( + stream: hipStream_t, + event: hipEvent_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return flags associated with this stream. + + @param[in] stream stream to be queried + @param[in,out] flags Pointer to an unsigned integer in which the stream's flags are returned + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidHandle + + @returns #hipSuccess #hipErrorInvalidValue #hipErrorInvalidHandle + + Return flags associated with this stream in *@p flags. + + @see hipStreamCreateWithFlags*/ + pub fn hipStreamGetFlags( + stream: hipStream_t, + flags: *mut ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Query the priority of a stream. + + @param[in] stream stream to be queried + @param[in,out] priority Pointer to an unsigned integer in which the stream's priority is returned + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidHandle + + @returns #hipSuccess #hipErrorInvalidValue #hipErrorInvalidHandle + + Query the priority of a stream. The priority is returned in in priority. + + @see hipStreamCreateWithFlags*/ + pub fn hipStreamGetPriority( + stream: hipStream_t, + priority: *mut ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get the device assocaited with the stream + + @param[in] stream stream to be queried + @param[out] device device associated with the stream + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorContextIsDestroyed, #hipErrorInvalidHandle, + #hipErrorNotInitialized, #hipErrorDeinitialized, #hipErrorInvalidContext + + @see hipStreamCreate, hipStreamDestroy, hipDeviceGetStreamPriorityRange*/ + pub fn hipStreamGetDevice( + stream: hipStream_t, + device: *mut hipDevice_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create an asynchronous stream with the specified CU mask. + + @param[in, out] stream Pointer to new stream + @param[in ] cuMaskSize Size of CU mask bit array passed in. + @param[in ] cuMask Bit-vector representing the CU mask. Each active bit represents using one CU. + The first 32 bits represent the first 32 CUs, and so on. If its size is greater than physical + CU number (i.e., multiProcessorCount member of hipDeviceProp_t), the extra elements are ignored. + It is user's responsibility to make sure the input is meaningful. + @return #hipSuccess, #hipErrorInvalidHandle, #hipErrorInvalidValue + + Create a new asynchronous stream with the specified CU mask. @p stream returns an opaque handle + that can be used to reference the newly created stream in subsequent hipStream* commands. The + stream is allocated on the heap and will remain allocated even if the handle goes out-of-scope. + To release the memory used by the stream, application must call hipStreamDestroy. + + + @see hipStreamCreate, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy*/ + pub fn hipExtStreamCreateWithCUMask( + stream: *mut hipStream_t, + cuMaskSize: u32, + cuMask: *const u32, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get CU mask associated with an asynchronous stream + + @param[in] stream stream to be queried + @param[in] cuMaskSize number of the block of memories (uint32_t *) allocated by user + @param[out] cuMask Pointer to a pre-allocated block of memories (uint32_t *) in which + the stream's CU mask is returned. The CU mask is returned in a chunck of 32 bits where + each active bit represents one active CU + @return #hipSuccess, #hipErrorInvalidHandle, #hipErrorInvalidValue + + @see hipStreamCreate, hipStreamSynchronize, hipStreamWaitEvent, hipStreamDestroy*/ + pub fn hipExtStreamGetCUMask( + stream: hipStream_t, + cuMaskSize: u32, + cuMask: *mut u32, + ) -> hipError_t; +} +/// Stream CallBack struct +pub type hipStreamCallback_t = ::core::option::Option< + unsafe extern "C" fn( + stream: hipStream_t, + status: hipError_t, + userData: *mut ::core::ffi::c_void, + ), +>; +extern "C" { + #[must_use] + /** @brief Adds a callback to be called on the host after all currently enqueued + items in the stream have completed. For each + hipStreamAddCallback call, a callback will be executed exactly once. + The callback will block later work in the stream until it is finished. + @param[in] stream - Stream to add callback to + @param[in] callback - The function to call once preceding stream operations are complete + @param[in] userData - User specified data to be passed to the callback function + @param[in] flags - Reserved for future use, must be 0 + @return #hipSuccess, #hipErrorInvalidHandle, #hipErrorNotSupported + + @see hipStreamCreate, hipStreamCreateWithFlags, hipStreamQuery, hipStreamSynchronize, + hipStreamWaitEvent, hipStreamDestroy, hipStreamCreateWithPriority +*/ + pub fn hipStreamAddCallback( + stream: hipStream_t, + callback: hipStreamCallback_t, + userData: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup StreamM Stream Memory Operations\n @{\n This section describes Stream Memory Wait and Write functions of HIP runtime API.\n/\n/**\n @brief Enqueues a wait command to the stream.[BETA]\n\n @param [in] stream - Stream identifier\n @param [in] ptr - Pointer to memory object allocated using 'hipMallocSignalMemory' flag\n @param [in] value - Value to be used in compare operation\n @param [in] flags - Defines the compare operation, supported values are hipStreamWaitValueGte\n hipStreamWaitValueEq, hipStreamWaitValueAnd and hipStreamWaitValueNor\n @param [in] mask - Mask to be applied on value at memory before it is compared with value,\n default value is set to enable every bit\n\n @returns #hipSuccess, #hipErrorInvalidValue\n\n Enqueues a wait command to the stream, all operations enqueued on this stream after this, will\n not execute until the defined wait condition is true.\n\n hipStreamWaitValueGte: waits until *ptr&mask >= value\n hipStreamWaitValueEq : waits until *ptr&mask == value\n hipStreamWaitValueAnd: waits until ((*ptr&mask) & value) != 0\n hipStreamWaitValueNor: waits until ~((*ptr&mask) | (value&mask)) != 0\n\n @note when using 'hipStreamWaitValueNor', mask is applied on both 'value' and '*ptr'.\n\n @note Support for hipStreamWaitValue32 can be queried using 'hipDeviceGetAttribute()' and\n 'hipDeviceAttributeCanUseStreamWaitValue' flag.\n\n @warning This API is marked as beta, meaning, while this is feature complete,\n it is still open to changes and may have outstanding issues.\n\n @see hipExtMallocWithFlags, hipFree, hipStreamWaitValue64, hipStreamWriteValue64,\n hipStreamWriteValue32, hipDeviceGetAttribute"] + pub fn hipStreamWaitValue32( + stream: hipStream_t, + ptr: *mut ::core::ffi::c_void, + value: u32, + flags: ::core::ffi::c_uint, + mask: u32, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Enqueues a wait command to the stream.[BETA] + + @param [in] stream - Stream identifier + @param [in] ptr - Pointer to memory object allocated using 'hipMallocSignalMemory' flag + @param [in] value - Value to be used in compare operation + @param [in] flags - Defines the compare operation, supported values are hipStreamWaitValueGte + hipStreamWaitValueEq, hipStreamWaitValueAnd and hipStreamWaitValueNor. + @param [in] mask - Mask to be applied on value at memory before it is compared with value + default value is set to enable every bit + + @returns #hipSuccess, #hipErrorInvalidValue + + Enqueues a wait command to the stream, all operations enqueued on this stream after this, will + not execute until the defined wait condition is true. + + hipStreamWaitValueGte: waits until *ptr&mask >= value + hipStreamWaitValueEq : waits until *ptr&mask == value + hipStreamWaitValueAnd: waits until ((*ptr&mask) & value) != 0 + hipStreamWaitValueNor: waits until ~((*ptr&mask) | (value&mask)) != 0 + + @note when using 'hipStreamWaitValueNor', mask is applied on both 'value' and '*ptr'. + + @note Support for hipStreamWaitValue64 can be queried using 'hipDeviceGetAttribute()' and + 'hipDeviceAttributeCanUseStreamWaitValue' flag. + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @see hipExtMallocWithFlags, hipFree, hipStreamWaitValue32, hipStreamWriteValue64, + hipStreamWriteValue32, hipDeviceGetAttribute*/ + pub fn hipStreamWaitValue64( + stream: hipStream_t, + ptr: *mut ::core::ffi::c_void, + value: u64, + flags: ::core::ffi::c_uint, + mask: u64, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Enqueues a write command to the stream.[BETA] + + @param [in] stream - Stream identifier + @param [in] ptr - Pointer to a GPU accessible memory object + @param [in] value - Value to be written + @param [in] flags - reserved, ignored for now, will be used in future releases + + @returns #hipSuccess, #hipErrorInvalidValue + + Enqueues a write command to the stream, write operation is performed after all earlier commands + on this stream have completed the execution. + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @see hipExtMallocWithFlags, hipFree, hipStreamWriteValue32, hipStreamWaitValue32, + hipStreamWaitValue64*/ + pub fn hipStreamWriteValue32( + stream: hipStream_t, + ptr: *mut ::core::ffi::c_void, + value: u32, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Enqueues a write command to the stream.[BETA] + + @param [in] stream - Stream identifier + @param [in] ptr - Pointer to a GPU accessible memory object + @param [in] value - Value to be written + @param [in] flags - reserved, ignored for now, will be used in future releases + + @returns #hipSuccess, #hipErrorInvalidValue + + Enqueues a write command to the stream, write operation is performed after all earlier commands + on this stream have completed the execution. + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @see hipExtMallocWithFlags, hipFree, hipStreamWriteValue32, hipStreamWaitValue32, + hipStreamWaitValue64*/ + pub fn hipStreamWriteValue64( + stream: hipStream_t, + ptr: *mut ::core::ffi::c_void, + value: u64, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Event Event Management\n @{\n This section describes the event management functions of HIP runtime API.\n/\n/**\n @brief Create an event with the specified flags\n\n @param[in,out] event Returns the newly created event.\n @param[in] flags Flags to control event behavior. Valid values are #hipEventDefault,\n#hipEventBlockingSync, #hipEventDisableTiming, #hipEventInterprocess\n #hipEventDefault : Default flag. The event will use active synchronization and will support\ntiming. Blocking synchronization provides lowest possible latency at the expense of dedicating a\nCPU to poll on the event.\n #hipEventBlockingSync : The event will use blocking synchronization : if hipEventSynchronize is\ncalled on this event, the thread will block until the event completes. This can increase latency\nfor the synchroniation but can result in lower power and more resources for other CPU threads.\n #hipEventDisableTiming : Disable recording of timing information. Events created with this flag\nwould not record profiling data and provide best performance if used for synchronization.\n #hipEventInterprocess : The event can be used as an interprocess event. hipEventDisableTiming\nflag also must be set when hipEventInterprocess flag is set.\n #hipEventDisableSystemFence : Disable acquire and release system scope fence. This may\nimprove performance but device memory may not be visible to the host and other devices\nif this flag is set.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue,\n#hipErrorLaunchFailure, #hipErrorOutOfMemory\n\n @see hipEventCreate, hipEventSynchronize, hipEventDestroy, hipEventElapsedTime"] + pub fn hipEventCreateWithFlags( + event: *mut hipEvent_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** Create an event + + @param[in,out] event Returns the newly created event. + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue, + #hipErrorLaunchFailure, #hipErrorOutOfMemory + + @see hipEventCreateWithFlags, hipEventRecord, hipEventQuery, hipEventSynchronize, + hipEventDestroy, hipEventElapsedTime*/ + pub fn hipEventCreate(event: *mut hipEvent_t) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipEventRecord(event: hipEvent_t, stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroy the specified event. + + @param[in] event Event to destroy. + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue, + #hipErrorLaunchFailure + + Releases memory associated with the event. If the event is recording but has not completed + recording when hipEventDestroy() is called, the function will return immediately and the + completion_future resources will be released later, when the hipDevice is synchronized. + + @see hipEventCreate, hipEventCreateWithFlags, hipEventQuery, hipEventSynchronize, hipEventRecord, + hipEventElapsedTime + + @returns #hipSuccess*/ + pub fn hipEventDestroy(event: hipEvent_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Wait for an event to complete. + + This function will block until the event is ready, waiting for all previous work in the stream + specified when event was recorded with hipEventRecord(). + + If hipEventRecord() has not been called on @p event, this function returns #hipSuccess when no + event is captured. + + + @param[in] event Event on which to wait. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized, + #hipErrorInvalidHandle, #hipErrorLaunchFailure + + @see hipEventCreate, hipEventCreateWithFlags, hipEventQuery, hipEventDestroy, hipEventRecord, + hipEventElapsedTime*/ + pub fn hipEventSynchronize(event: hipEvent_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return the elapsed time between two events. + + @param[out] ms : Return time between start and stop in ms. + @param[in] start : Start event. + @param[in] stop : Stop event. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotReady, #hipErrorInvalidHandle, + #hipErrorNotInitialized, #hipErrorLaunchFailure + + Computes the elapsed time between two events. Time is computed in ms, with + a resolution of approximately 1 us. + + Events which are recorded in a NULL stream will block until all commands + on all other streams complete execution, and then record the timestamp. + + Events which are recorded in a non-NULL stream will record their timestamp + when they reach the head of the specified stream, after all previous + commands in that stream have completed executing. Thus the time that + the event recorded may be significantly after the host calls hipEventRecord(). + + If hipEventRecord() has not been called on either event, then #hipErrorInvalidHandle is + returned. If hipEventRecord() has been called on both events, but the timestamp has not yet been + recorded on one or both events (that is, hipEventQuery() would return #hipErrorNotReady on at + least one of the events), then #hipErrorNotReady is returned. + + @see hipEventCreate, hipEventCreateWithFlags, hipEventQuery, hipEventDestroy, hipEventRecord, + hipEventSynchronize*/ + pub fn hipEventElapsedTime( + ms: *mut f32, + start: hipEvent_t, + stop: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Query event status + + @param[in] event Event to query. + @returns #hipSuccess, #hipErrorNotReady, #hipErrorInvalidHandle, #hipErrorInvalidValue, + #hipErrorNotInitialized, #hipErrorLaunchFailure + + Query the status of the specified event. This function will return #hipSuccess if all + commands in the appropriate stream (specified to hipEventRecord()) have completed. If any execution + has not completed, then #hipErrorNotReady is returned. + + @note: This API returns #hipSuccess, if hipEventRecord() is not called before this API. + + @see hipEventCreate, hipEventCreateWithFlags, hipEventRecord, hipEventDestroy, + hipEventSynchronize, hipEventElapsedTime*/ + pub fn hipEventQuery(event: hipEvent_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets information on the specified pointer.[BETA] + + @param [in] value Sets pointer attribute value + @param [in] attribute Attribute to set + @param [in] ptr Pointer to set attributes for + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipPointerSetAttribute( + value: *const ::core::ffi::c_void, + attribute: hipPointer_attribute, + ptr: hipDeviceptr_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns attributes for the specified pointer + + @param [out] attributes attributes for the specified pointer + @param [in] ptr pointer to get attributes for + + The output parameter 'attributes' has a member named 'type' that describes what memory the + pointer is associated with, such as device memory, host memory, managed memory, and others. + Otherwise, the API cannot handle the pointer and returns #hipErrorInvalidValue. + + @note The unrecognized memory type is unsupported to keep the HIP functionality backward + compatibility due to #hipMemoryType enum values. + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @note The current behavior of this HIP API corresponds to the CUDA API before version 11.0. + + @see hipPointerGetAttribute*/ + pub fn hipPointerGetAttributes( + attributes: *mut hipPointerAttribute_t, + ptr: *const ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns information about the specified pointer.[BETA] + + @param [in, out] data Returned pointer attribute value + @param [in] attribute Attribute to query for + @param [in] ptr Pointer to get attributes for + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @see hipPointerGetAttributes*/ + pub fn hipPointerGetAttribute( + data: *mut ::core::ffi::c_void, + attribute: hipPointer_attribute, + ptr: hipDeviceptr_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns information about the specified pointer.[BETA] + + @param [in] numAttributes number of attributes to query for + @param [in] attributes attributes to query for + @param [in, out] data a two-dimensional containing pointers to memory locations + where the result of each attribute query will be written to + @param [in] ptr pointer to get attributes for + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @see hipPointerGetAttribute*/ + pub fn hipDrvPointerGetAttributes( + numAttributes: ::core::ffi::c_uint, + attributes: *mut hipPointer_attribute, + data: *mut *mut ::core::ffi::c_void, + ptr: hipDeviceptr_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = "-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup External External Resource Interoperability\n @{\n @ingroup API\n\n This section describes the external resource interoperability functions of HIP runtime API.\n\n/\n/**\n @brief Imports an external semaphore.\n\n @param[out] extSem_out External semaphores to be waited on\n @param[in] semHandleDesc Semaphore import handle descriptor\n\n @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue\n\n @see"] + pub fn hipImportExternalSemaphore( + extSem_out: *mut hipExternalSemaphore_t, + semHandleDesc: *const hipExternalSemaphoreHandleDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Signals a set of external semaphore objects. + + @param[in] extSemArray External semaphores to be waited on + @param[in] paramsArray Array of semaphore parameters + @param[in] numExtSems Number of semaphores to wait on + @param[in] stream Stream to enqueue the wait operations in + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see*/ + pub fn hipSignalExternalSemaphoresAsync( + extSemArray: *const hipExternalSemaphore_t, + paramsArray: *const hipExternalSemaphoreSignalParams, + numExtSems: ::core::ffi::c_uint, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Waits on a set of external semaphore objects + + @param[in] extSemArray External semaphores to be waited on + @param[in] paramsArray Array of semaphore parameters + @param[in] numExtSems Number of semaphores to wait on + @param[in] stream Stream to enqueue the wait operations in + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see*/ + pub fn hipWaitExternalSemaphoresAsync( + extSemArray: *const hipExternalSemaphore_t, + paramsArray: *const hipExternalSemaphoreWaitParams, + numExtSems: ::core::ffi::c_uint, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys an external semaphore object and releases any references to the underlying resource. Any outstanding signals or waits must have completed before the semaphore is destroyed. + + @param[in] extSem handle to an external memory object + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see*/ + pub fn hipDestroyExternalSemaphore(extSem: hipExternalSemaphore_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Imports an external memory object. + + @param[out] extMem_out Returned handle to an external memory object + @param[in] memHandleDesc Memory import handle descriptor + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see*/ + pub fn hipImportExternalMemory( + extMem_out: *mut hipExternalMemory_t, + memHandleDesc: *const hipExternalMemoryHandleDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Maps a buffer onto an imported memory object. + + @param[out] devPtr Returned device pointer to buffer + @param[in] extMem Handle to external memory object + @param[in] bufferDesc Buffer descriptor + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see*/ + pub fn hipExternalMemoryGetMappedBuffer( + devPtr: *mut *mut ::core::ffi::c_void, + extMem: hipExternalMemory_t, + bufferDesc: *const hipExternalMemoryBufferDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys an external memory object. + + @param[in] extMem External memory object to be destroyed + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue + + @see*/ + pub fn hipDestroyExternalMemory(extMem: hipExternalMemory_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Maps a mipmapped array onto an external memory object. + + @param[out] mipmap mipmapped array to return + @param[in] extMem external memory object handle + @param[in] mipmapDesc external mipmapped array descriptor + + Returned mipmapped array must be freed using hipFreeMipmappedArray. + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidResourceHandle + + @see hipImportExternalMemory, hipDestroyExternalMemory, hipExternalMemoryGetMappedBuffer, hipFreeMipmappedArray*/ + pub fn hipExternalMemoryGetMappedMipmappedArray( + mipmap: *mut hipMipmappedArray_t, + extMem: hipExternalMemory_t, + mipmapDesc: *const hipExternalMemoryMipmappedArrayDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n @brief Allocate memory on the default accelerator\n\n @param[out] ptr Pointer to the allocated memory\n @param[in] size Requested memory size\n\n If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned.\n\n @return #hipSuccess, #hipErrorOutOfMemory, #hipErrorInvalidValue (bad context, null *ptr)\n\n @see hipMallocPitch, hipFree, hipMallocArray, hipFreeArray, hipMalloc3D, hipMalloc3DArray,\n hipHostFree, hipHostMalloc"] + pub fn hipMalloc(ptr: *mut *mut ::core::ffi::c_void, size: usize) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocate memory on the default accelerator + + @param[out] ptr Pointer to the allocated memory + @param[in] sizeBytes Requested memory size + @param[in] flags Type of memory allocation + + If requested memory size is 0, no memory is allocated, *ptr returns nullptr, and #hipSuccess + is returned. + + The memory allocation flag should be either #hipDeviceMallocDefault, + #hipDeviceMallocFinegrained, #hipDeviceMallocUncached, or #hipMallocSignalMemory. + If the flag is any other value, the API returns #hipErrorInvalidValue. + + @return #hipSuccess, #hipErrorOutOfMemory, #hipErrorInvalidValue (bad context, null *ptr) + + @see hipMallocPitch, hipFree, hipMallocArray, hipFreeArray, hipMalloc3D, hipMalloc3DArray, + hipHostFree, hipHostMalloc*/ + pub fn hipExtMallocWithFlags( + ptr: *mut *mut ::core::ffi::c_void, + sizeBytes: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocate pinned host memory [Deprecated] + + @param[out] ptr Pointer to the allocated host pinned memory + @param[in] size Requested memory size + + If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned. + + @return #hipSuccess, #hipErrorOutOfMemory + + @warning This API is deprecated, use hipHostMalloc() instead*/ + pub fn hipMallocHost(ptr: *mut *mut ::core::ffi::c_void, size: usize) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocate pinned host memory [Deprecated] + + @param[out] ptr Pointer to the allocated host pinned memory + @param[in] size Requested memory size + + If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned. + + @return #hipSuccess, #hipErrorOutOfMemory + + @warning This API is deprecated, use hipHostMalloc() instead*/ + pub fn hipMemAllocHost( + ptr: *mut *mut ::core::ffi::c_void, + size: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocates device accessible page locked (pinned) host memory + + This API allocates pinned host memory which is mapped into the address space of all GPUs + in the system, the memory can be accessed directly by the GPU device, and can be read or + written with much higher bandwidth than pageable memory obtained with functions such as + malloc(). + + Using the pinned host memory, applications can implement faster data transfers for HostToDevice + and DeviceToHost. The runtime tracks the hipHostMalloc allocations and can avoid some of the + setup required for regular unpinned memory. + + When the memory accesses are infrequent, zero-copy memory can be a good choice, for coherent + allocation. GPU can directly access the host memory over the CPU/GPU interconnect, without need + to copy the data. + + Currently the allocation granularity is 4KB for the API. + + Developers need to choose proper allocation flag with consideration of synchronization. + + @param[out] ptr Pointer to the allocated host pinned memory + @param[in] size Requested memory size in bytes + If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned. + @param[in] flags Type of host memory allocation + + If no input for flags, it will be the default pinned memory allocation on the host. + + @return #hipSuccess, #hipErrorOutOfMemory + + @see hipSetDeviceFlags, hipHostFree*/ + pub fn hipHostMalloc( + ptr: *mut *mut ::core::ffi::c_void, + size: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = "-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup MemoryM Managed Memory\n\n @ingroup Memory\n @{\n This section describes the managed memory management functions of HIP runtime API.\n\n @note The managed memory management APIs are implemented on Linux, under developement\n on Windows.\n\n/\n/**\n @brief Allocates memory that will be automatically managed by HIP.\n\n This API is used for managed memory, allows data be shared and accessible to both CPU and\n GPU using a single pointer.\n\n The API returns the allocation pointer, managed by HMM, can be used further to execute kernels\n on device and fetch data between the host and device as needed.\n\n @note It is recommend to do the capability check before call this API.\n\n @param [out] dev_ptr - pointer to allocated device memory\n @param [in] size - requested allocation size in bytes, it should be granularity of 4KB\n @param [in] flags - must be either hipMemAttachGlobal or hipMemAttachHost\n (defaults to hipMemAttachGlobal)\n\n @returns #hipSuccess, #hipErrorMemoryAllocation, #hipErrorNotSupported, #hipErrorInvalidValue\n"] + pub fn hipMallocManaged( + dev_ptr: *mut *mut ::core::ffi::c_void, + size: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Prefetches memory to the specified destination device using HIP. + + @param [in] dev_ptr pointer to be prefetched + @param [in] count size in bytes for prefetching + @param [in] device destination device to prefetch to + @param [in] stream stream to enqueue prefetch operation + + @returns #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPrefetchAsync( + dev_ptr: *const ::core::ffi::c_void, + count: usize, + device: ::core::ffi::c_int, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Advise about the usage of a given memory range to HIP. + + @param [in] dev_ptr pointer to memory to set the advice for + @param [in] count size in bytes of the memory range, it should be CPU page size alligned. + @param [in] advice advice to be applied for the specified memory range + @param [in] device device to apply the advice for + + @returns #hipSuccess, #hipErrorInvalidValue + + This HIP API advises about the usage to be applied on unified memory allocation in the + range starting from the pointer address devPtr, with the size of count bytes. + The memory range must refer to managed memory allocated via the API hipMallocManaged, and the + range will be handled with proper round down and round up respectively in the driver to + be aligned to CPU page size, the same way as corresponding CUDA API behaves in CUDA version 8.0 + and afterwards. + + @note This API is implemented on Linux and is under development on Windows.*/ + pub fn hipMemAdvise( + dev_ptr: *const ::core::ffi::c_void, + count: usize, + advice: hipMemoryAdvise, + device: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Query an attribute of a given memory range in HIP. + + @param [in,out] data a pointer to a memory location where the result of each + attribute query will be written to + @param [in] data_size the size of data + @param [in] attribute the attribute to query + @param [in] dev_ptr start of the range to query + @param [in] count size of the range to query + + @returns #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemRangeGetAttribute( + data: *mut ::core::ffi::c_void, + data_size: usize, + attribute: hipMemRangeAttribute, + dev_ptr: *const ::core::ffi::c_void, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Query attributes of a given memory range in HIP. + + @param [in,out] data a two-dimensional array containing pointers to memory locations + where the result of each attribute query will be written to + @param [in] data_sizes an array, containing the sizes of each result + @param [in] attributes the attribute to query + @param [in] num_attributes an array of attributes to query (numAttributes and the number + of attributes in this array should match) + @param [in] dev_ptr start of the range to query + @param [in] count size of the range to query + + @returns #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemRangeGetAttributes( + data: *mut *mut ::core::ffi::c_void, + data_sizes: *mut usize, + attributes: *mut hipMemRangeAttribute, + num_attributes: usize, + dev_ptr: *const ::core::ffi::c_void, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Attach memory to a stream asynchronously in HIP. + + @param [in] stream - stream in which to enqueue the attach operation + @param [in] dev_ptr - pointer to memory (must be a pointer to managed memory or + to a valid host-accessible region of system-allocated memory) + @param [in] length - length of memory (defaults to zero) + @param [in] flags - must be one of hipMemAttachGlobal, hipMemAttachHost or + hipMemAttachSingle (defaults to hipMemAttachSingle) + + @returns #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipStreamAttachMemAsync( + stream: hipStream_t, + dev_ptr: *mut ::core::ffi::c_void, + length: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocates memory with stream ordered semantics + + Inserts a memory allocation operation into @p stream. + A pointer to the allocated memory is returned immediately in *dptr. + The allocation must not be accessed until the allocation operation completes. + The allocation comes from the memory pool associated with the stream's device. + + @note The default memory pool of a device contains device memory from that device. + @note Basic stream ordering allows future work submitted into the same stream to use the + allocation. Stream query, stream synchronize, and HIP events can be used to guarantee that + the allocation operation completes before work submitted in a separate stream runs. + @note During stream capture, this function results in the creation of an allocation node. + In this case, the allocation is owned by the graph instead of the memory pool. The memory + pool's properties are used to set the node's creation parameters. + + @param [out] dev_ptr Returned device pointer of memory allocation + @param [in] size Number of bytes to allocate + @param [in] stream The stream establishing the stream ordering contract and + the memory pool to allocate from + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported, #hipErrorOutOfMemory + + @see hipMallocFromPoolAsync, hipFreeAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute, + hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMallocAsync( + dev_ptr: *mut *mut ::core::ffi::c_void, + size: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Frees memory with stream ordered semantics + + Inserts a free operation into @p stream. + The allocation must not be used after stream execution reaches the free. + After this API returns, accessing the memory from any subsequent work launched on the GPU + or querying its pointer attributes results in undefined behavior. + + @note During stream capture, this function results in the creation of a free node and + must therefore be passed the address of a graph allocation. + + @param [in] dev_ptr Pointer to device memory to free + @param [in] stream The stream, where the destruciton will occur according to the execution order + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @see hipMallocFromPoolAsync, hipMallocAsync, hipMemPoolTrimTo, hipMemPoolGetAttribute, + hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipFreeAsync( + dev_ptr: *mut ::core::ffi::c_void, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Releases freed memory back to the OS + + Releases memory back to the OS until the pool contains fewer than @p min_bytes_to_keep + reserved bytes, or there is no more memory that the allocator can safely release. + The allocator cannot release OS allocations that back outstanding asynchronous allocations. + The OS allocations may happen at different granularity from the user allocations. + + @note: Allocations that have not been freed count as outstanding. + @note: Allocations that have been asynchronously freed but whose completion has + not been observed on the host (eg. by a synchronize) can count as outstanding. + + @param[in] mem_pool The memory pool to trim allocations + @param[in] min_bytes_to_hold If the pool has less than min_bytes_to_hold reserved, + then the TrimTo operation is a no-op. Otherwise the memory pool will contain + at least min_bytes_to_hold bytes reserved after the operation. + + @returns #hipSuccess, #hipErrorInvalidValue + + @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, + hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolTrimTo( + mem_pool: hipMemPool_t, + min_bytes_to_hold: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets attributes of a memory pool + + Supported attributes are: + - @p hipMemPoolAttrReleaseThreshold: (value type = cuuint64_t) + Amount of reserved memory in bytes to hold onto before trying + to release memory back to the OS. When more than the release + threshold bytes of memory are held by the memory pool, the + allocator will try to release memory back to the OS on the + next call to stream, event or context synchronize. (default 0) + - @p hipMemPoolReuseFollowEventDependencies: (value type = int) + Allow @p hipMallocAsync to use memory asynchronously freed + in another stream as long as a stream ordering dependency + of the allocating stream on the free action exists. + HIP events and null stream interactions can create the required + stream ordered dependencies. (default enabled) + - @p hipMemPoolReuseAllowOpportunistic: (value type = int) + Allow reuse of already completed frees when there is no dependency + between the free and allocation. (default enabled) + - @p hipMemPoolReuseAllowInternalDependencies: (value type = int) + Allow @p hipMallocAsync to insert new stream dependencies + in order to establish the stream ordering required to reuse + a piece of memory released by @p hipFreeAsync (default enabled). + + @param [in] mem_pool The memory pool to modify + @param [in] attr The attribute to modify + @param [in] value Pointer to the value to assign + + @returns #hipSuccess, #hipErrorInvalidValue + + @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, + hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolSetAttribute( + mem_pool: hipMemPool_t, + attr: hipMemPoolAttr, + value: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets attributes of a memory pool + + Supported attributes are: + - @p hipMemPoolAttrReleaseThreshold: (value type = cuuint64_t) + Amount of reserved memory in bytes to hold onto before trying + to release memory back to the OS. When more than the release + threshold bytes of memory are held by the memory pool, the + allocator will try to release memory back to the OS on the + next call to stream, event or context synchronize. (default 0) + - @p hipMemPoolReuseFollowEventDependencies: (value type = int) + Allow @p hipMallocAsync to use memory asynchronously freed + in another stream as long as a stream ordering dependency + of the allocating stream on the free action exists. + HIP events and null stream interactions can create the required + stream ordered dependencies. (default enabled) + - @p hipMemPoolReuseAllowOpportunistic: (value type = int) + Allow reuse of already completed frees when there is no dependency + between the free and allocation. (default enabled) + - @p hipMemPoolReuseAllowInternalDependencies: (value type = int) + Allow @p hipMallocAsync to insert new stream dependencies + in order to establish the stream ordering required to reuse + a piece of memory released by @p hipFreeAsync (default enabled). + + @param [in] mem_pool The memory pool to get attributes of + @param [in] attr The attribute to get + @param [in] value Retrieved value + + @returns #hipSuccess, #hipErrorInvalidValue + + @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, + hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolGetAttribute( + mem_pool: hipMemPool_t, + attr: hipMemPoolAttr, + value: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Controls visibility of the specified pool between devices + + @param [in] mem_pool Memory pool for acccess change + @param [in] desc_list Array of access descriptors. Each descriptor instructs the access to enable for a single gpu + @param [in] count Number of descriptors in the map array. + + @returns #hipSuccess, #hipErrorInvalidValue + + @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, + hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolSetAccess( + mem_pool: hipMemPool_t, + desc_list: *const hipMemAccessDesc, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the accessibility of a pool from a device + + Returns the accessibility of the pool's memory from the specified location. + + @param [out] flags Accessibility of the memory pool from the specified location/device + @param [in] mem_pool Memory pool being queried + @param [in] location Location/device for memory pool access + + @returns #hipSuccess, #hipErrorInvalidValue + + @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, + hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolGetAccess( + flags: *mut hipMemAccessFlags, + mem_pool: hipMemPool_t, + location: *mut hipMemLocation, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memory pool + + Creates a HIP memory pool and returns the handle in @p mem_pool. The @p pool_props determines + the properties of the pool such as the backing device and IPC capabilities. + + By default, the memory pool will be accessible from the device it is allocated on. + + @param [out] mem_pool Contains createed memory pool + @param [in] pool_props Memory pool properties + + @note Specifying hipMemHandleTypeNone creates a memory pool that will not support IPC. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, hipMemPoolDestroy, + hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolCreate( + mem_pool: *mut hipMemPool_t, + pool_props: *const hipMemPoolProps, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys the specified memory pool + + If any pointers obtained from this pool haven't been freed or + the pool has free operations that haven't completed + when @p hipMemPoolDestroy is invoked, the function will return immediately and the + resources associated with the pool will be released automatically + once there are no more outstanding allocations. + + Destroying the current mempool of a device sets the default mempool of + that device as the current mempool for that device. + + @param [in] mem_pool Memory pool for destruction + + @note A device's default memory pool cannot be destroyed. + + @returns #hipSuccess, #hipErrorInvalidValue + + @see hipMallocFromPoolAsync, hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, hipMemPoolCreate + hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolDestroy(mem_pool: hipMemPool_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocates memory from a specified pool with stream ordered semantics. + + Inserts an allocation operation into @p stream. + A pointer to the allocated memory is returned immediately in @p dev_ptr. + The allocation must not be accessed until the allocation operation completes. + The allocation comes from the specified memory pool. + + @note The specified memory pool may be from a device different than that of the specified @p stream. + + Basic stream ordering allows future work submitted into the same stream to use the allocation. + Stream query, stream synchronize, and HIP events can be used to guarantee that the allocation + operation completes before work submitted in a separate stream runs. + + @note During stream capture, this function results in the creation of an allocation node. In this case, + the allocation is owned by the graph instead of the memory pool. The memory pool's properties + are used to set the node's creation parameters. + + @param [out] dev_ptr Returned device pointer + @param [in] size Number of bytes to allocate + @param [in] mem_pool The pool to allocate from + @param [in] stream The stream establishing the stream ordering semantic + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported, #hipErrorOutOfMemory + + @see hipMallocAsync, hipFreeAsync, hipMemPoolGetAttribute, hipMemPoolCreate + hipMemPoolTrimTo, hipDeviceSetMemPool, hipMemPoolSetAttribute, hipMemPoolSetAccess, hipMemPoolGetAccess, + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMallocFromPoolAsync( + dev_ptr: *mut *mut ::core::ffi::c_void, + size: usize, + mem_pool: hipMemPool_t, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Exports a memory pool to the requested handle type. + + Given an IPC capable mempool, create an OS handle to share the pool with another process. + A recipient process can convert the shareable handle into a mempool with @p hipMemPoolImportFromShareableHandle. + Individual pointers can then be shared with the @p hipMemPoolExportPointer and @p hipMemPoolImportPointer APIs. + The implementation of what the shareable handle is and how it can be transferred is defined by the requested + handle type. + + @note: To create an IPC capable mempool, create a mempool with a @p hipMemAllocationHandleType other + than @p hipMemHandleTypeNone. + + @param [out] shared_handle Pointer to the location in which to store the requested handle + @param [in] mem_pool Pool to export + @param [in] handle_type The type of handle to create + @param [in] flags Must be 0 + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory + + @see hipMemPoolImportFromShareableHandle + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolExportToShareableHandle( + shared_handle: *mut ::core::ffi::c_void, + mem_pool: hipMemPool_t, + handle_type: hipMemAllocationHandleType, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Imports a memory pool from a shared handle. + + Specific allocations can be imported from the imported pool with @p hipMemPoolImportPointer. + + @note Imported memory pools do not support creating new allocations. + As such imported memory pools may not be used in @p hipDeviceSetMemPool + or @p hipMallocFromPoolAsync calls. + + @param [out] mem_pool Returned memory pool + @param [in] shared_handle OS handle of the pool to open + @param [in] handle_type The type of handle being imported + @param [in] flags Must be 0 + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory + + @see hipMemPoolExportToShareableHandle + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolImportFromShareableHandle( + mem_pool: *mut hipMemPool_t, + shared_handle: *mut ::core::ffi::c_void, + handle_type: hipMemAllocationHandleType, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Export data to share a memory pool allocation between processes. + + Constructs @p export_data for sharing a specific allocation from an already shared memory pool. + The recipient process can import the allocation with the @p hipMemPoolImportPointer api. + The data is not a handle and may be shared through any IPC mechanism. + + @param[out] export_data Returned export data + @param[in] dev_ptr Pointer to memory being exported + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory + + @see hipMemPoolImportPointer + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolExportPointer( + export_data: *mut hipMemPoolPtrExportData, + dev_ptr: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Import a memory pool allocation from another process. + + Returns in @p dev_ptr a pointer to the imported memory. + The imported memory must not be accessed before the allocation operation completes + in the exporting process. The imported memory must be freed from all importing processes before + being freed in the exporting process. The pointer may be freed with @p hipFree + or @p hipFreeAsync. If @p hipFreeAsync is used, the free must be completed + on the importing process before the free operation on the exporting process. + + @note The @p hipFreeAsync api may be used in the exporting process before + the @p hipFreeAsync operation completes in its stream as long as the + @p hipFreeAsync in the exporting process specifies a stream with + a stream dependency on the importing process's @p hipFreeAsync. + + @param [out] dev_ptr Pointer to imported memory + @param [in] mem_pool Memory pool from which to import a pointer + @param [in] export_data Data specifying the memory to import + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized, #hipErrorOutOfMemory + + @see hipMemPoolExportPointer + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemPoolImportPointer( + dev_ptr: *mut *mut ::core::ffi::c_void, + mem_pool: hipMemPool_t, + export_data: *mut hipMemPoolPtrExportData, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocate device accessible page locked host memory [Deprecated] + + @param[out] ptr Pointer to the allocated host pinned memory + @param[in] size Requested memory size in bytes + @param[in] flags Type of host memory allocation + + If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned. + + @return #hipSuccess, #hipErrorOutOfMemory + + @warning This API is deprecated, use hipHostMalloc() instead*/ + pub fn hipHostAlloc( + ptr: *mut *mut ::core::ffi::c_void, + size: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get Device pointer from Host Pointer allocated through hipHostMalloc + + @param[out] devPtr Device Pointer mapped to passed host pointer + @param[in] hstPtr Host Pointer allocated through hipHostMalloc + @param[in] flags Flags to be passed for extension + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorOutOfMemory + + @see hipSetDeviceFlags, hipHostMalloc*/ + pub fn hipHostGetDevicePointer( + devPtr: *mut *mut ::core::ffi::c_void, + hstPtr: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return flags associated with host pointer + + @param[out] flagsPtr Memory location to store flags + @param[in] hostPtr Host Pointer allocated through hipHostMalloc + @return #hipSuccess, #hipErrorInvalidValue + + @see hipHostMalloc*/ + pub fn hipHostGetFlags( + flagsPtr: *mut ::core::ffi::c_uint, + hostPtr: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Register host memory so it can be accessed from the current device. + + @param[out] hostPtr Pointer to host memory to be registered. + @param[in] sizeBytes Size of the host memory + @param[in] flags See below. + + Flags: + - #hipHostRegisterDefault Memory is Mapped and Portable + - #hipHostRegisterPortable Memory is considered registered by all contexts. HIP only supports + one context so this is always assumed true. + - #hipHostRegisterMapped Map the allocation into the address space for the current device. + The device pointer can be obtained with #hipHostGetDevicePointer. + + + After registering the memory, use #hipHostGetDevicePointer to obtain the mapped device pointer. + On many systems, the mapped device pointer will have a different value than the mapped host + pointer. Applications must use the device pointer in device code, and the host pointer in device + code. + + On some systems, registered memory is pinned. On some systems, registered memory may not be + actually be pinned but uses OS or hardware facilities to all GPU access to the host memory. + + Developers are strongly encouraged to register memory blocks which are aligned to the host + cache-line size. (typically 64-bytes but can be obtains from the CPUID instruction). + + If registering non-aligned pointers, the application must take care when register pointers from + the same cache line on different devices. HIP's coarse-grained synchronization model does not + guarantee correct results if different devices write to different parts of the same cache block - + typically one of the writes will "win" and overwrite data from the other registered memory + region. + + @return #hipSuccess, #hipErrorOutOfMemory + + @see hipHostUnregister, hipHostGetFlags, hipHostGetDevicePointer*/ + pub fn hipHostRegister( + hostPtr: *mut ::core::ffi::c_void, + sizeBytes: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Un-register host pointer + + @param[in] hostPtr Host pointer previously registered with #hipHostRegister + @return Error code + + @see hipHostRegister*/ + pub fn hipHostUnregister(hostPtr: *mut ::core::ffi::c_void) -> hipError_t; +} +extern "C" { + #[must_use] + /** Allocates at least width (in bytes) * height bytes of linear memory + Padding may occur to ensure alighnment requirements are met for the given row + The change in width size due to padding will be returned in *pitch. + Currently the alignment is set to 128 bytes + + @param[out] ptr Pointer to the allocated device memory + @param[out] pitch Pitch for allocation (in bytes) + @param[in] width Requested pitched allocation width (in bytes) + @param[in] height Requested pitched allocation height + + If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned. + + @return Error code + + @see hipMalloc, hipFree, hipMallocArray, hipFreeArray, hipHostFree, hipMalloc3D, + hipMalloc3DArray, hipHostMalloc*/ + pub fn hipMallocPitch( + ptr: *mut *mut ::core::ffi::c_void, + pitch: *mut usize, + width: usize, + height: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** Allocates at least width (in bytes) * height bytes of linear memory + Padding may occur to ensure alighnment requirements are met for the given row + The change in width size due to padding will be returned in *pitch. + Currently the alignment is set to 128 bytes + + @param[out] dptr Pointer to the allocated device memory + @param[out] pitch Pitch for allocation (in bytes) + @param[in] widthInBytes Requested pitched allocation width (in bytes) + @param[in] height Requested pitched allocation height + @param[in] elementSizeBytes The size of element bytes, should be 4, 8 or 16 + + If size is 0, no memory is allocated, *ptr returns nullptr, and hipSuccess is returned. + The intended usage of pitch is as a separate parameter of the allocation, used to compute addresses within the 2D array. + Given the row and column of an array element of type T, the address is computed as: + T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column; + + @return Error code + + @see hipMalloc, hipFree, hipMallocArray, hipFreeArray, hipHostFree, hipMalloc3D, + hipMalloc3DArray, hipHostMalloc*/ + pub fn hipMemAllocPitch( + dptr: *mut hipDeviceptr_t, + pitch: *mut usize, + widthInBytes: usize, + height: usize, + elementSizeBytes: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Free memory allocated by the hcc hip memory allocation API. + This API performs an implicit hipDeviceSynchronize() call. + If pointer is NULL, the hip runtime is initialized and hipSuccess is returned. + + @param[in] ptr Pointer to memory to be freed + @return #hipSuccess + @return #hipErrorInvalidDevicePointer (if pointer is invalid, including host pointers allocated + with hipHostMalloc) + + @see hipMalloc, hipMallocPitch, hipMallocArray, hipFreeArray, hipHostFree, hipMalloc3D, + hipMalloc3DArray, hipHostMalloc*/ + pub fn hipFree(ptr: *mut ::core::ffi::c_void) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Free memory allocated by the hcc hip host memory allocation API [Deprecated] + + @param[in] ptr Pointer to memory to be freed + @return #hipSuccess, + #hipErrorInvalidValue (if pointer is invalid, including device pointers allocated + with hipMalloc) + + @warning This API is deprecated, use hipHostFree() instead*/ + pub fn hipFreeHost(ptr: *mut ::core::ffi::c_void) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Free memory allocated by the hcc hip host memory allocation API + This API performs an implicit hipDeviceSynchronize() call. + If pointer is NULL, the hip runtime is initialized and hipSuccess is returned. + + @param[in] ptr Pointer to memory to be freed + @return #hipSuccess, + #hipErrorInvalidValue (if pointer is invalid, including device pointers allocated with + hipMalloc) + + @see hipMalloc, hipMallocPitch, hipFree, hipMallocArray, hipFreeArray, hipMalloc3D, + hipMalloc3DArray, hipHostMalloc*/ + pub fn hipHostFree(ptr: *mut ::core::ffi::c_void) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from src to dst. + + It supports memory from host to device, + device to host, device to device and host to host + The src and dst must not overlap. + + For hipMemcpy, the copy is always performed by the current device (set by hipSetDevice). + For multi-gpu or peer-to-peer configurations, it is recommended to set the current device to the + device where the src data is physically located. For optimal peer-to-peer copies, the copy device + must be able to access the src and dst pointers (by calling hipDeviceEnablePeerAccess with copy + agent as the current device and src/dest as the peerDevice argument. if this is not done, the + hipMemcpy will still work, but will perform the copy using a staging buffer on the host. + Calling hipMemcpy with dst and src pointers that do not match the hipMemcpyKind results in + undefined behavior. + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + @param[in] kind Kind of transfer + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpy( + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Memory copy on the stream. + It allows single or multiple devices to do memory copy on single or multiple streams. + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + @param[in] kind Kind of transfer + @param[in] stream Valid stream + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown, #hipErrorContextIsDestroyed + + @see hipMemcpy, hipStreamCreate, hipStreamSynchronize, hipStreamDestroy, hipSetDevice, hipLaunchKernelGGL +*/ + pub fn hipMemcpyWithStream( + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from Host to Device + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyHtoD( + dst: hipDeviceptr_t, + src: *mut ::core::ffi::c_void, + sizeBytes: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from Device to Host + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyDtoH( + dst: *mut ::core::ffi::c_void, + src: hipDeviceptr_t, + sizeBytes: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from Device to Device + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyDtoD( + dst: hipDeviceptr_t, + src: hipDeviceptr_t, + sizeBytes: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies from one 1D array to device memory. + + @param[out] dstDevice Destination device pointer + @param[in] srcArray Source array + @param[in] srcOffset Offset in bytes of source array + @param[in] ByteCount Size of memory copy in bytes + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyAtoD( + dstDevice: hipDeviceptr_t, + srcArray: hipArray_t, + srcOffset: usize, + ByteCount: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies from device memory to a 1D array. + + @param[out] dstArray Destination array + @param[in] dstOffset Offset in bytes of destination array + @param[in] srcDevice Source device pointer + @param[in] ByteCount Size of memory copy in bytes + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyDtoA( + dstArray: hipArray_t, + dstOffset: usize, + srcDevice: hipDeviceptr_t, + ByteCount: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies from one 1D array to another. + + @param[out] dstArray Destination array + @param[in] dstOffset Offset in bytes of destination array + @param[in] srcArray Source array + @param[in] srcOffset Offset in bytes of source array + @param[in] ByteCount Size of memory copy in bytes + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyAtoA( + dstArray: hipArray_t, + dstOffset: usize, + srcArray: hipArray_t, + srcOffset: usize, + ByteCount: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from Host to Device asynchronously + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + @param[in] stream Stream identifier + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyHtoDAsync( + dst: hipDeviceptr_t, + src: *mut ::core::ffi::c_void, + sizeBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from Device to Host asynchronously + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + @param[in] stream Stream identifier + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyDtoHAsync( + dst: *mut ::core::ffi::c_void, + src: hipDeviceptr_t, + sizeBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from Device to Device asynchronously + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + @param[in] stream Stream identifier + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyDtoDAsync( + dst: hipDeviceptr_t, + src: hipDeviceptr_t, + sizeBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies from one 1D array to host memory. + + @param[out] dstHost Destination pointer + @param[in] srcArray Source array + @param[in] srcOffset Offset in bytes of source array + @param[in] ByteCount Size of memory copy in bytes + @param[in] stream Stream identifier + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyAtoHAsync( + dstHost: *mut ::core::ffi::c_void, + srcArray: hipArray_t, + srcOffset: usize, + ByteCount: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies from host memory to a 1D array. + + @param[out] dstArray Destination array + @param[in] dstOffset Offset in bytes of destination array + @param[in] srcHost Source host pointer + @param[in] ByteCount Size of memory copy in bytes + @param[in] stream Stream identifier + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, hipMemAllocHost, + hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, hipMemcpyAtoA, + hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, hipMemcpyDtoA, hipMemcpyDtoD, + hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, hipMemcpyHtoA, hipMemcpyHtoAAsync, + hipMemcpyHtoDAsync, hipMemFree, hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, + hipMemHostAlloc, hipMemHostGetDevicePointer*/ + pub fn hipMemcpyHtoAAsync( + dstArray: hipArray_t, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a global pointer from a module. + Returns in *dptr and *bytes the pointer and size of the global of name name located in module hmod. + If no variable of that name exists, it returns hipErrorNotFound. Both parameters dptr and bytes are optional. + If one of them is NULL, it is ignored and hipSuccess is returned. + + @param[out] dptr Returns global device pointer + @param[out] bytes Returns global size in bytes + @param[in] hmod Module to retrieve global from + @param[in] name Name of global to retrieve + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotFound, #hipErrorInvalidContext +*/ + pub fn hipModuleGetGlobal( + dptr: *mut hipDeviceptr_t, + bytes: *mut usize, + hmod: hipModule_t, + name: *const ::core::ffi::c_char, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets device pointer associated with symbol on the device. + + @param[out] devPtr pointer to the device associated the symbole + @param[in] symbol pointer to the symbole of the device + + @return #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGetSymbolAddress( + devPtr: *mut *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the size of the given symbol on the device. + + @param[in] symbol pointer to the device symbole + @param[out] size pointer to the size + + @return #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGetSymbolSize( + size: *mut usize, + symbol: *const ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the pointer of requested HIP driver function. + + @param[in] symbol The Symbol name of the driver function to request. + @param[out] pfn Output pointer to the requested driver function. + @param[in] hipVersion The HIP version for the requested driver function symbol. + HIP version is defined as 100*version_major + version_minor. For example, in HIP 6.1, the + hipversion is 601, for the symbol function "hipGetDeviceProperties", the specified hipVersion 601 + is greater or equal to the version 600, the symbol function will be handle properly as backend + compatible function. + + @param[in] flags Currently only default flag is suppported. + @param[out] symbolStatus Optional enumeration for returned status of searching for symbol driver + function based on the input hipVersion. + + Returns hipSuccess if the returned pfn is addressed to the pointer of found driver function. + + @return #hipSuccess, #hipErrorInvalidValue.*/ + pub fn hipGetProcAddress( + symbol: *const ::core::ffi::c_char, + pfn: *mut *mut ::core::ffi::c_void, + hipVersion: ::core::ffi::c_int, + flags: u64, + symbolStatus: *mut hipDriverProcAddressQueryResult, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data to the given symbol on the device. + Symbol HIP APIs allow a kernel to define a device-side data symbol which can be accessed on + the host side. The symbol can be in __constant or device space. + Note that the symbol name needs to be encased in the HIP_SYMBOL macro. + This also applies to hipMemcpyFromSymbol, hipGetSymbolAddress, and hipGetSymbolSize. + For detailed usage, see the + memcpyToSymbol example + in the HIP Porting Guide. + + + @param[out] symbol pointer to the device symbole + @param[in] src pointer to the source address + @param[in] sizeBytes size in bytes to copy + @param[in] offset offset in bytes from start of symbole + @param[in] kind type of memory transfer + + @return #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipMemcpyToSymbol( + symbol: *const ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data to the given symbol on the device asynchronously. + + @param[out] symbol pointer to the device symbole + @param[in] src pointer to the source address + @param[in] sizeBytes size in bytes to copy + @param[in] offset offset in bytes from start of symbole + @param[in] kind type of memory transfer + @param[in] stream stream identifier + + @return #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipMemcpyToSymbolAsync( + symbol: *const ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data from the given symbol on the device. + + @param[out] dst Returns pointer to destinition memory address + @param[in] symbol Pointer to the symbole address on the device + @param[in] sizeBytes Size in bytes to copy + @param[in] offset Offset in bytes from the start of symbole + @param[in] kind Type of memory transfer + + @return #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipMemcpyFromSymbol( + dst: *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data from the given symbol on the device asynchronously. + + @param[out] dst Returns pointer to destinition memory address + @param[in] symbol pointer to the symbole address on the device + @param[in] sizeBytes size in bytes to copy + @param[in] offset offset in bytes from the start of symbole + @param[in] kind type of memory transfer + @param[in] stream stream identifier + + @return #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipMemcpyFromSymbolAsync( + dst: *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copy data from src to dst asynchronously. + + @warning If host or dest are not pinned, the memory copy will be performed synchronously. For + best performance, use hipHostMalloc to allocate host memory that is transferred asynchronously. + + @warning on HCC hipMemcpyAsync does not support overlapped H2D and D2H copies. + For hipMemcpy, the copy is always performed by the device associated with the specified stream. + + For multi-gpu or peer-to-peer configurations, it is recommended to use a stream which is a + attached to the device where the src data is physically located. For optimal peer-to-peer copies, + the copy device must be able to access the src and dst pointers (by calling + hipDeviceEnablePeerAccess with copy agent as the current device and src/dest as the peerDevice + argument. if this is not done, the hipMemcpy will still work, but will perform the copy using a + staging buffer on the host. + + @param[out] dst Data being copy to + @param[in] src Data being copy from + @param[in] sizeBytes Data size in bytes + @param[in] kind Type of memory transfer + @param[in] stream Stream identifier + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown + + @see hipMemcpy, hipMemcpy2D, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray, + hipMemcpy2DFromArray, hipMemcpyArrayToArray, hipMemcpy2DArrayToArray, hipMemcpyToSymbol, + hipMemcpyFromSymbol, hipMemcpy2DAsync, hipMemcpyToArrayAsync, hipMemcpy2DToArrayAsync, + hipMemcpyFromArrayAsync, hipMemcpy2DFromArrayAsync, hipMemcpyToSymbolAsync, + hipMemcpyFromSymbolAsync*/ + pub fn hipMemcpyAsync( + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant + byte value value. + + @param[out] dst Data being filled + @param[in] value Value to be set + @param[in] sizeBytes Data size in bytes + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized*/ + pub fn hipMemset( + dst: *mut ::core::ffi::c_void, + value: ::core::ffi::c_int, + sizeBytes: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant + byte value value. + + @param[out] dest Data ptr to be filled + @param[in] value Value to be set + @param[in] count Number of values to be set + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized*/ + pub fn hipMemsetD8( + dest: hipDeviceptr_t, + value: ::core::ffi::c_uchar, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant + byte value value. + + hipMemsetD8Async() is asynchronous with respect to the host, so the call may return before the + memset is complete. The operation can optionally be associated to a stream by passing a non-zero + stream argument. If stream is non-zero, the operation may overlap with operations in other + streams. + + @param[out] dest Data ptr to be filled + @param[in] value Constant value to be set + @param[in] count Number of values to be set + @param[in] stream Stream identifier + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized*/ + pub fn hipMemsetD8Async( + dest: hipDeviceptr_t, + value: ::core::ffi::c_uchar, + count: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant + short value value. + + @param[out] dest Data ptr to be filled + @param[in] value Constant value to be set + @param[in] count Number of values to be set + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized*/ + pub fn hipMemsetD16( + dest: hipDeviceptr_t, + value: ::core::ffi::c_ushort, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the first sizeBytes bytes of the memory area pointed to by dest with the constant + short value value. + + hipMemsetD16Async() is asynchronous with respect to the host, so the call may return before the + memset is complete. The operation can optionally be associated to a stream by passing a non-zero + stream argument. If stream is non-zero, the operation may overlap with operations in other + streams. + + @param[out] dest Data ptr to be filled + @param[in] value Constant value to be set + @param[in] count Number of values to be set + @param[in] stream Stream identifier + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized*/ + pub fn hipMemsetD16Async( + dest: hipDeviceptr_t, + value: ::core::ffi::c_ushort, + count: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the memory area pointed to by dest with the constant integer + value for specified number of times. + + @param[out] dest Data being filled + @param[in] value Constant value to be set + @param[in] count Number of values to be set + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized*/ + pub fn hipMemsetD32( + dest: hipDeviceptr_t, + value: ::core::ffi::c_int, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the first sizeBytes bytes of the memory area pointed to by dev with the constant + byte value value. + + hipMemsetAsync() is asynchronous with respect to the host, so the call may return before the + memset is complete. The operation can optionally be associated to a stream by passing a non-zero + stream argument. If stream is non-zero, the operation may overlap with operations in other + streams. + + @param[out] dst Pointer to device memory + @param[in] value Value to set for each byte of specified memory + @param[in] sizeBytes Size in bytes to set + @param[in] stream Stream identifier + @return #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipMemsetAsync( + dst: *mut ::core::ffi::c_void, + value: ::core::ffi::c_int, + sizeBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the memory area pointed to by dev with the constant integer + value for specified number of times. + + hipMemsetD32Async() is asynchronous with respect to the host, so the call may return before the + memset is complete. The operation can optionally be associated to a stream by passing a non-zero + stream argument. If stream is non-zero, the operation may overlap with operations in other + streams. + + @param[out] dst Pointer to device memory + @param[in] value Value to set for each byte of specified memory + @param[in] count Number of values to be set + @param[in] stream Stream identifier + @return #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipMemsetD32Async( + dst: hipDeviceptr_t, + value: ::core::ffi::c_int, + count: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills the memory area pointed to by dst with the constant value. + + @param[out] dst Pointer to device memory + @param[in] pitch Data size in bytes + @param[in] value Constant value to be set + @param[in] width + @param[in] height + @return #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipMemset2D( + dst: *mut ::core::ffi::c_void, + pitch: usize, + value: ::core::ffi::c_int, + width: usize, + height: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills asynchronously the memory area pointed to by dst with the constant value. + + @param[in] dst Pointer to 2D device memory + @param[in] pitch Pitch size in bytes + @param[in] value Value to be set for each byte of specified memory + @param[in] width Width of matrix set columns in bytes + @param[in] height Height of matrix set rows in bytes + @param[in] stream Stream identifier + @return #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipMemset2DAsync( + dst: *mut ::core::ffi::c_void, + pitch: usize, + value: ::core::ffi::c_int, + width: usize, + height: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills synchronously the memory area pointed to by pitchedDevPtr with the constant value. + + @param[in] pitchedDevPtr Pointer to pitched device memory + @param[in] value Value to set for each byte of specified memory + @param[in] extent Size parameters for width field in bytes in device memory + @return #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipMemset3D( + pitchedDevPtr: hipPitchedPtr, + value: ::core::ffi::c_int, + extent: hipExtent, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Fills asynchronously the memory area pointed to by pitchedDevPtr with the constant value. + + @param[in] pitchedDevPtr Pointer to pitched device memory + @param[in] value Value to set for each byte of specified memory + @param[in] extent Size parameters for width field in bytes in device memory + @param[in] stream Stream identifier + @return #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipMemset3DAsync( + pitchedDevPtr: hipPitchedPtr, + value: ::core::ffi::c_int, + extent: hipExtent, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Query memory info. + + On ROCM, this function gets the actual free memory left on the current device, so supports + the cases while running multi-workload (such as multiple processes, multiple threads, and + multiple GPUs). + + @warning On Windows, the free memory only accounts for memory allocated by this process and may + be optimistic. + + @param[out] free Returns free memory on the current device in bytes + @param[out] total Returns total allocatable memory on the current device in bytes + + @return #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue +*/ + pub fn hipMemGetInfo(free: *mut usize, total: *mut usize) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get allocated memory size via memory pointer. + + This function gets the allocated shared virtual memory size from memory pointer. + + @param[in] ptr Pointer to allocated memory + @param[out] size Returns the allocated memory size in bytes + + @return #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipMemPtrGetInfo( + ptr: *mut ::core::ffi::c_void, + size: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocate an array on the device. + + @param[out] array Pointer to allocated array in device memory + @param[in] desc Requested channel format + @param[in] width Requested array allocation width + @param[in] height Requested array allocation height + @param[in] flags Requested properties of allocated array + @return #hipSuccess, #hipErrorOutOfMemory + + @see hipMalloc, hipMallocPitch, hipFree, hipFreeArray, hipHostMalloc, hipHostFree*/ + pub fn hipMallocArray( + array: *mut hipArray_t, + desc: *const hipChannelFormatDesc, + width: usize, + height: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create an array memory pointer on the device. + + @param[out] pHandle Pointer to the array memory + @param[in] pAllocateArray Requested array desciptor + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @see hipMallocArray, hipArrayDestroy, hipFreeArray*/ + pub fn hipArrayCreate( + pHandle: *mut hipArray_t, + pAllocateArray: *const HIP_ARRAY_DESCRIPTOR, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroy an array memory pointer on the device. + + @param[in] array Pointer to the array memory + + @return #hipSuccess, #hipErrorInvalidValue + + @see hipArrayCreate, hipArrayDestroy, hipFreeArray*/ + pub fn hipArrayDestroy(array: hipArray_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create a 3D array memory pointer on the device. + + @param[out] array Pointer to the 3D array memory + @param[in] pAllocateArray Requested array desciptor + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @see hipMallocArray, hipArrayDestroy, hipFreeArray*/ + pub fn hipArray3DCreate( + array: *mut hipArray_t, + pAllocateArray: *const HIP_ARRAY3D_DESCRIPTOR, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create a 3D memory pointer on the device. + + @param[out] pitchedDevPtr Pointer to the 3D memory + @param[in] extent Requested extent + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @see hipMallocPitch, hipMemGetInfo, hipFree*/ + pub fn hipMalloc3D( + pitchedDevPtr: *mut hipPitchedPtr, + extent: hipExtent, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Frees an array on the device. + + @param[in] array Pointer to array to free + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorNotInitialized + + @see hipMalloc, hipMallocPitch, hipFree, hipMallocArray, hipHostMalloc, hipHostFree*/ + pub fn hipFreeArray(array: hipArray_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocate an array on the device. + + @param[out] array Pointer to allocated array in device memory + @param[in] desc Requested channel format + @param[in] extent Requested array allocation width, height and depth + @param[in] flags Requested properties of allocated array + @return #hipSuccess, #hipErrorOutOfMemory + + @see hipMalloc, hipMallocPitch, hipFree, hipFreeArray, hipHostMalloc, hipHostFree*/ + pub fn hipMalloc3DArray( + array: *mut hipArray_t, + desc: *const hipChannelFormatDesc, + extent: hipExtent, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets info about the specified array + + @param[out] desc - Returned array type + @param[out] extent - Returned array shape. 2D arrays will have depth of zero + @param[out] flags - Returned array flags + @param[in] array - The HIP array to get info for + + @return #hipSuccess, #hipErrorInvalidValue #hipErrorInvalidHandle + + @see hipArrayGetDescriptor, hipArray3DGetDescriptor*/ + pub fn hipArrayGetInfo( + desc: *mut hipChannelFormatDesc, + extent: *mut hipExtent, + flags: *mut ::core::ffi::c_uint, + array: hipArray_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets a 1D or 2D array descriptor + + @param[out] pArrayDescriptor - Returned array descriptor + @param[in] array - Array to get descriptor of + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue #hipErrorInvalidHandle + + @see hipArray3DCreate, hipArray3DGetDescriptor, hipArrayCreate, hipArrayDestroy, hipMemAlloc, + hipMemAllocHost, hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, + hipMemcpy3D, hipMemcpy3DAsync, hipMemcpyAtoA, hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, + hipMemcpyDtoA, hipMemcpyDtoD, hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, + hipMemcpyHtoA, hipMemcpyHtoAAsync, hipMemcpyHtoD, hipMemcpyHtoDAsync, hipMemFree, + hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, hipMemHostAlloc, + hipMemHostGetDevicePointer, hipMemsetD8, hipMemsetD16, hipMemsetD32, hipArrayGetInfo*/ + pub fn hipArrayGetDescriptor( + pArrayDescriptor: *mut HIP_ARRAY_DESCRIPTOR, + array: hipArray_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets a 3D array descriptor + + @param[out] pArrayDescriptor - Returned 3D array descriptor + @param[in] array - 3D array to get descriptor of + + @return #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidValue #hipErrorInvalidHandle, #hipErrorContextIsDestroyed + + @see hipArray3DCreate, hipArrayCreate, hipArrayDestroy, hipArrayGetDescriptor, hipMemAlloc, + hipMemAllocHost, hipMemAllocPitch, hipMemcpy2D, hipMemcpy2DAsync, hipMemcpy2DUnaligned, + hipMemcpy3D, hipMemcpy3DAsync, hipMemcpyAtoA, hipMemcpyAtoD, hipMemcpyAtoH, hipMemcpyAtoHAsync, + hipMemcpyDtoA, hipMemcpyDtoD, hipMemcpyDtoDAsync, hipMemcpyDtoH, hipMemcpyDtoHAsync, + hipMemcpyHtoA, hipMemcpyHtoAAsync, hipMemcpyHtoD, hipMemcpyHtoDAsync, hipMemFree, + hipMemFreeHost, hipMemGetAddressRange, hipMemGetInfo, hipMemHostAlloc, + hipMemHostGetDevicePointer, hipMemsetD8, hipMemsetD16, hipMemsetD32, hipArrayGetInfo*/ + pub fn hipArray3DGetDescriptor( + pArrayDescriptor: *mut HIP_ARRAY3D_DESCRIPTOR, + array: hipArray_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] dpitch Pitch of destination memory + @param[in] src Source memory address + @param[in] spitch Pitch of source memory + @param[in] width Width of matrix transfer (columns in bytes) + @param[in] height Height of matrix transfer (rows) + @param[in] kind Type of transfer + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy2D( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies memory for 2D arrays. + @param[in] pCopy Parameters for the memory copy + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2D, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray, + hipMemcpyToSymbol, hipMemcpyAsync*/ + pub fn hipMemcpyParam2D(pCopy: *const hip_Memcpy2D) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies memory for 2D arrays. + @param[in] pCopy Parameters for the memory copy + @param[in] stream Stream to use + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2D, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray, + hipMemcpyToSymbol, hipMemcpyAsync*/ + pub fn hipMemcpyParam2DAsync( + pCopy: *const hip_Memcpy2D, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] dpitch Pitch of destination memory + @param[in] src Source memory address + @param[in] spitch Pitch of source memory + @param[in] width Width of matrix transfer (columns in bytes) + @param[in] height Height of matrix transfer (rows) + @param[in] kind Type of transfer + @param[in] stream Stream to use + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpyToArray, hipMemcpy2DToArray, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy2DAsync( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] wOffset Destination starting X offset + @param[in] hOffset Destination starting Y offset + @param[in] src Source memory address + @param[in] spitch Pitch of source memory + @param[in] width Width of matrix transfer (columns in bytes) + @param[in] height Height of matrix transfer (rows) + @param[in] kind Type of transfer + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpyToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy2DToArray( + dst: hipArray_t, + wOffset: usize, + hOffset: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] wOffset Destination starting X offset + @param[in] hOffset Destination starting Y offset + @param[in] src Source memory address + @param[in] spitch Pitch of source memory + @param[in] width Width of matrix transfer (columns in bytes) + @param[in] height Height of matrix transfer (rows) + @param[in] kind Type of transfer + @param[in] stream Accelerator view which the copy is being enqueued + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpyToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy2DToArrayAsync( + dst: hipArray_t, + wOffset: usize, + hOffset: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] wOffsetDst Destination starting X offset + @param[in] hOffsetDst Destination starting Y offset + @param[in] src Source memory address + @param[in] wOffsetSrc Source starting X offset + @param[in] hOffsetSrc Source starting Y offset (columns in bytes) + @param[in] width Width of matrix transfer (columns in bytes) + @param[in] height Height of matrix transfer (rows) + @param[in] kind Type of transfer + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpyToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy2DArrayToArray( + dst: hipArray_t, + wOffsetDst: usize, + hOffsetDst: usize, + src: hipArray_const_t, + wOffsetSrc: usize, + hOffsetSrc: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] wOffset Destination starting X offset + @param[in] hOffset Destination starting Y offset + @param[in] src Source memory address + @param[in] count size in bytes to copy + @param[in] kind Type of transfer + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync + @warning This API is deprecated.*/ + pub fn hipMemcpyToArray( + dst: hipArray_t, + wOffset: usize, + hOffset: usize, + src: *const ::core::ffi::c_void, + count: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] srcArray Source memory address + @param[in] wOffset Source starting X offset + @param[in] hOffset Source starting Y offset + @param[in] count Size in bytes to copy + @param[in] kind Type of transfer + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync + @warning This API is deprecated.*/ + pub fn hipMemcpyFromArray( + dst: *mut ::core::ffi::c_void, + srcArray: hipArray_const_t, + wOffset: usize, + hOffset: usize, + count: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] dpitch Pitch of destination memory + @param[in] src Source memory address + @param[in] wOffset Source starting X offset + @param[in] hOffset Source starting Y offset + @param[in] width Width of matrix transfer (columns in bytes) + @param[in] height Height of matrix transfer (rows) + @param[in] kind Type of transfer + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy2DFromArray( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: hipArray_const_t, + wOffset: usize, + hOffset: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device asynchronously. + + @param[in] dst Destination memory address + @param[in] dpitch Pitch of destination memory + @param[in] src Source memory address + @param[in] wOffset Source starting X offset + @param[in] hOffset Source starting Y offset + @param[in] width Width of matrix transfer (columns in bytes) + @param[in] height Height of matrix transfer (rows) + @param[in] kind Type of transfer + @param[in] stream Accelerator view which the copy is being enqueued + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy2DFromArrayAsync( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: hipArray_const_t, + wOffset: usize, + hOffset: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dst Destination memory address + @param[in] srcArray Source array + @param[in] srcOffset Offset in bytes of source array + @param[in] count Size of memory copy in bytes + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpyAtoH( + dst: *mut ::core::ffi::c_void, + srcArray: hipArray_t, + srcOffset: usize, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] dstArray Destination memory address + @param[in] dstOffset Offset in bytes of destination array + @param[in] srcHost Source host pointer + @param[in] count Size of memory copy in bytes + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpyHtoA( + dstArray: hipArray_t, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] p 3D memory copy parameters + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy3D(p: *const hipMemcpy3DParms) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device asynchronously. + + @param[in] p 3D memory copy parameters + @param[in] stream Stream to use + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipMemcpy3DAsync( + p: *const hipMemcpy3DParms, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device. + + @param[in] pCopy 3D memory copy parameters + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipDrvMemcpy3D(pCopy: *const HIP_MEMCPY3D) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies data between host and device asynchronously. + + @param[in] pCopy 3D memory copy parameters + @param[in] stream Stream to use + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidPitchValue, + #hipErrorInvalidDevicePointer, #hipErrorInvalidMemcpyDirection + + @see hipMemcpy, hipMemcpy2DToArray, hipMemcpy2D, hipMemcpyFromArray, hipMemcpyToSymbol, + hipMemcpyAsync*/ + pub fn hipDrvMemcpy3DAsync( + pCopy: *const HIP_MEMCPY3D, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup PeerToPeer PeerToPeer Device Memory Access\n @{\n @warning PeerToPeer support is experimental.\n This section describes the PeerToPeer device memory access functions of HIP runtime API.\n/\n/**\n @brief Determine if a device can access a peer's memory.\n\n @param [out] canAccessPeer Returns the peer access capability (0 or 1)\n @param [in] deviceId - device from where memory may be accessed.\n @param [in] peerDeviceId - device where memory is physically located\n\n Returns \"1\" in @p canAccessPeer if the specified @p device is capable\n of directly accessing memory physically located on peerDevice , or \"0\" if not.\n\n Returns \"0\" in @p canAccessPeer if deviceId == peerDeviceId, and both are valid devices : a\n device is not a peer of itself.\n\n @returns #hipSuccess,\n @returns #hipErrorInvalidDevice if deviceId or peerDeviceId are not valid devices"] + pub fn hipDeviceCanAccessPeer( + canAccessPeer: *mut ::core::ffi::c_int, + deviceId: ::core::ffi::c_int, + peerDeviceId: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Enable direct access from current device's virtual address space to memory allocations + physically located on a peer device. + + Memory which already allocated on peer device will be mapped into the address space of the + current device. In addition, all future memory allocations on peerDeviceId will be mapped into + the address space of the current device when the memory is allocated. The peer memory remains + accessible from the current device until a call to hipDeviceDisablePeerAccess or hipDeviceReset. + + + @param [in] peerDeviceId Peer device to enable direct access to from the current device + @param [in] flags Reserved for future use, must be zero + + Returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue, + @returns #hipErrorPeerAccessAlreadyEnabled if peer access is already enabled for this device.*/ + pub fn hipDeviceEnablePeerAccess( + peerDeviceId: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Disable direct access from current device's virtual address space to memory allocations + physically located on a peer device. + + Returns hipErrorPeerAccessNotEnabled if direct access to memory on peerDevice has not yet been + enabled from the current device. + + @param [in] peerDeviceId Peer device to disable direct access to + + @returns #hipSuccess, #hipErrorPeerAccessNotEnabled*/ + pub fn hipDeviceDisablePeerAccess(peerDeviceId: ::core::ffi::c_int) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get information on memory allocations. + + @param [out] pbase - BAse pointer address + @param [out] psize - Size of allocation + @param [in] dptr- Device Pointer + + @returns #hipSuccess, #hipErrorNotFound + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice*/ + pub fn hipMemGetAddressRange( + pbase: *mut hipDeviceptr_t, + psize: *mut usize, + dptr: hipDeviceptr_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies memory from one device to memory on another device. + + @param [out] dst - Destination device pointer. + @param [in] dstDeviceId - Destination device + @param [in] src - Source device pointer + @param [in] srcDeviceId - Source device + @param [in] sizeBytes - Size of memory copy in bytes + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDevice*/ + pub fn hipMemcpyPeer( + dst: *mut ::core::ffi::c_void, + dstDeviceId: ::core::ffi::c_int, + src: *const ::core::ffi::c_void, + srcDeviceId: ::core::ffi::c_int, + sizeBytes: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies memory from one device to memory on another device. + + @param [out] dst - Destination device pointer. + @param [in] dstDeviceId - Destination device + @param [in] src - Source device pointer + @param [in] srcDevice - Source device + @param [in] sizeBytes - Size of memory copy in bytes + @param [in] stream - Stream identifier + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDevice*/ + pub fn hipMemcpyPeerAsync( + dst: *mut ::core::ffi::c_void, + dstDeviceId: ::core::ffi::c_int, + src: *const ::core::ffi::c_void, + srcDevice: ::core::ffi::c_int, + sizeBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create a context and set it as current/default context + + @param [out] ctx Context to create + @param [in] flags Context creation flags + @param [in] device device handle + + @return #hipSuccess + + @see hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, hipCtxPushCurrent, + hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform. +*/ + pub fn hipCtxCreate( + ctx: *mut hipCtx_t, + flags: ::core::ffi::c_uint, + device: hipDevice_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroy a HIP context. + + @param [in] ctx Context to destroy + + @returns #hipSuccess, #hipErrorInvalidValue + + @see hipCtxCreate, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent,hipCtxSetCurrent, + hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize , hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxDestroy(ctx: hipCtx_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Pop the current/default context and return the popped context. + + @param [out] ctx The current context to pop + + @returns #hipSuccess, #hipErrorInvalidContext + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxSetCurrent, hipCtxGetCurrent, + hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxPopCurrent(ctx: *mut hipCtx_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Push the context to be set as current/ default context + + @param [in] ctx The current context to push + + @returns #hipSuccess, #hipErrorInvalidContext + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize , hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxPushCurrent(ctx: hipCtx_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set the passed context as current/default + + @param [in] ctx The context to set as current + + @returns #hipSuccess, #hipErrorInvalidContext + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize , hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxSetCurrent(ctx: hipCtx_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get the handle of the current/ default context + + @param [out] ctx The context to get as current + + @returns #hipSuccess, #hipErrorInvalidContext + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetDevice, hipCtxGetFlags, hipCtxPopCurrent, + hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxGetCurrent(ctx: *mut hipCtx_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get the handle of the device associated with current/default context + + @param [out] device The device from the current context + + @returns #hipSuccess, #hipErrorInvalidContext + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxGetDevice(device: *mut hipDevice_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the approximate HIP api version. + + @param [in] ctx Context to check + @param [out] apiVersion API version to get + + @return #hipSuccess + + @warning The HIP feature set does not correspond to an exact CUDA SDK api revision. + This function always set *apiVersion to 4 as an approximation though HIP supports + some features which were introduced in later CUDA SDK revisions. + HIP apps code should not rely on the api revision number here and should + use arch feature flags to test device capabilities or conditional compilation. + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetDevice, hipCtxGetFlags, hipCtxPopCurrent, + hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxGetApiVersion( + ctx: hipCtx_t, + apiVersion: *mut ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get Cache configuration for a specific function + + @param [out] cacheConfig Cache configuration + + @return #hipSuccess + + @warning AMD devices and some Nvidia GPUS do not support reconfigurable cache. This hint is + ignored on those architectures. + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxGetCacheConfig(cacheConfig: *mut hipFuncCache_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set L1/Shared cache partition. + + @param [in] cacheConfig Cache configuration to set + + @return #hipSuccess + + @warning AMD devices and some Nvidia GPUS do not support reconfigurable cache. This hint is + ignored on those architectures. + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxSetCacheConfig(cacheConfig: hipFuncCache_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set Shared memory bank configuration. + + @param [in] config Shared memory configuration to set + + @return #hipSuccess + + @warning AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is + ignored on those architectures. + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxSetSharedMemConfig(config: hipSharedMemConfig) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get Shared memory bank configuration. + + @param [out] pConfig Pointer of shared memory configuration + + @return #hipSuccess + + @warning AMD devices and some Nvidia GPUS do not support shared cache banking, and the hint is + ignored on those architectures. + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxGetSharedMemConfig(pConfig: *mut hipSharedMemConfig) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Blocks until the default context has completed all preceding requested tasks. + + @return #hipSuccess + + @warning This function waits for all streams on the default context to complete execution, and + then returns. + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxSynchronize() -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Return flags used for creating default context. + + @param [out] flags Pointer of flags + + @returns #hipSuccess + + @see hipCtxCreate, hipCtxDestroy, hipCtxPopCurrent, hipCtxGetCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxGetFlags(flags: *mut ::core::ffi::c_uint) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Enables direct access to memory allocations in a peer context. + + Memory which already allocated on peer device will be mapped into the address space of the + current device. In addition, all future memory allocations on peerDeviceId will be mapped into + the address space of the current device when the memory is allocated. The peer memory remains + accessible from the current device until a call to hipDeviceDisablePeerAccess or hipDeviceReset. + + + @param [in] peerCtx Peer context + @param [in] flags flags, need to set as 0 + + @returns #hipSuccess, #hipErrorInvalidDevice, #hipErrorInvalidValue, + #hipErrorPeerAccessAlreadyEnabled + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + @warning PeerToPeer support is experimental. + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxEnablePeerAccess( + peerCtx: hipCtx_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Disable direct access from current context's virtual address space to memory allocations + physically located on a peer context.Disables direct access to memory allocations in a peer + context and unregisters any registered allocations. + + Returns #hipErrorPeerAccessNotEnabled if direct access to memory on peerDevice has not yet been + enabled from the current device. + + @param [in] peerCtx Peer context to be disabled + + @returns #hipSuccess, #hipErrorPeerAccessNotEnabled + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + @warning PeerToPeer support is experimental. + + @warning This API is deprecated on the AMD platform, only for equivalent cuCtx driver API on the + NVIDIA platform.*/ + pub fn hipCtxDisablePeerAccess(peerCtx: hipCtx_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get the state of the primary context. + + @param [in] dev Device to get primary context flags for + @param [out] flags Pointer to store flags + @param [out] active Pointer to store context state; 0 = inactive, 1 = active + + @returns #hipSuccess + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent driver API on the + NVIDIA platform.*/ + pub fn hipDevicePrimaryCtxGetState( + dev: hipDevice_t, + flags: *mut ::core::ffi::c_uint, + active: *mut ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Release the primary context on the GPU. + + @param [in] dev Device which primary context is released + + @returns #hipSuccess + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + @warning This function return #hipSuccess though doesn't release the primaryCtx by design on + HIP/HCC path. + + @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA + platform.*/ + pub fn hipDevicePrimaryCtxRelease(dev: hipDevice_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Retain the primary context on the GPU. + + @param [out] pctx Returned context handle of the new context + @param [in] dev Device which primary context is released + + @returns #hipSuccess + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA + platform.*/ + pub fn hipDevicePrimaryCtxRetain( + pctx: *mut hipCtx_t, + dev: hipDevice_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Resets the primary context on the GPU. + + @param [in] dev Device which primary context is reset + + @returns #hipSuccess + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA + platform.*/ + pub fn hipDevicePrimaryCtxReset(dev: hipDevice_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set flags for the primary context. + + @param [in] dev Device for which the primary context flags are set + @param [in] flags New flags for the device + + @returns #hipSuccess, #hipErrorContextAlreadyInUse + + @see hipCtxCreate, hipCtxDestroy, hipCtxGetFlags, hipCtxPopCurrent, hipCtxGetCurrent, + hipCtxSetCurrent, hipCtxPushCurrent, hipCtxSetCacheConfig, hipCtxSynchronize, hipCtxGetDevice + + @warning This API is deprecated on the AMD platform, only for equivalent driver API on the NVIDIA + platform.*/ + pub fn hipDevicePrimaryCtxSetFlags( + dev: hipDevice_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n\n @defgroup Module Module Management\n @{\n @ingroup API\n This section describes the module management functions of HIP runtime API.\n\n/\n/**\n @brief Loads code object from file into a module the currrent context.\n\n @param [in] fname Filename of code object to load\n\n @param [out] module Module\n\n @warning File/memory resources allocated in this function are released only in hipModuleUnload.\n\n @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext, #hipErrorFileNotFound,\n #hipErrorOutOfMemory, #hipErrorSharedObjectInitFailed, #hipErrorNotInitialized\n"] + pub fn hipModuleLoad( + module: *mut hipModule_t, + fname: *const ::core::ffi::c_char, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Frees the module + + @param [in] module Module to free + + @returns #hipSuccess, #hipErrorInvalidResourceHandle + + The module is freed, and the code objects associated with it are destroyed.*/ + pub fn hipModuleUnload(module: hipModule_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Function with kname will be extracted if present in module + + @param [in] module Module to get function from + @param [in] kname Pointer to the name of function + @param [out] function Pointer to function handle + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidContext, #hipErrorNotInitialized, + #hipErrorNotFound,*/ + pub fn hipModuleGetFunction( + function: *mut hipFunction_t, + module: hipModule_t, + kname: *const ::core::ffi::c_char, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Find out attributes for a given function. + + @param [out] attr Attributes of funtion + @param [in] func Pointer to the function handle + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDeviceFunction*/ + pub fn hipFuncGetAttributes( + attr: *mut hipFuncAttributes, + func: *const ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Find out a specific attribute for a given function. + + @param [out] value Pointer to the value + @param [in] attrib Attributes of the given funtion + @param [in] hfunc Function to get attributes from + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDeviceFunction*/ + pub fn hipFuncGetAttribute( + value: *mut ::core::ffi::c_int, + attrib: hipFunction_attribute, + hfunc: hipFunction_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets pointer to device entry function that matches entry function symbolPtr. + + @param [out] functionPtr Device entry function + @param [in] symbolPtr Pointer to device entry function to search for + + @returns #hipSuccess, #hipErrorInvalidDeviceFunction +*/ + pub fn hipGetFuncBySymbol( + functionPtr: *mut hipFunction_t, + symbolPtr: *const ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief returns the handle of the texture reference with the name from the module. + + @param [in] hmod Module + @param [in] name Pointer of name of texture reference + @param [out] texRef Pointer of texture reference + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorNotFound, #hipErrorInvalidValue*/ + pub fn hipModuleGetTexRef( + texRef: *mut *mut textureReference, + hmod: hipModule_t, + name: *const ::core::ffi::c_char, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief builds module from code object which resides in host memory. Image is pointer to that + location. + + @param [in] image The pointer to the location of data + @param [out] module Retuned module + + @returns hipSuccess, hipErrorNotInitialized, hipErrorOutOfMemory, hipErrorNotInitialized*/ + pub fn hipModuleLoadData( + module: *mut hipModule_t, + image: *const ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief builds module from code object which resides in host memory. Image is pointer to that + location. Options are not used. hipModuleLoadData is called. + + @param [in] image The pointer to the location of data + @param [out] module Retuned module + @param [in] numOptions Number of options + @param [in] options Options for JIT + @param [in] optionValues Option values for JIT + + @returns hipSuccess, hipErrorNotInitialized, hipErrorOutOfMemory, hipErrorNotInitialized*/ + pub fn hipModuleLoadDataEx( + module: *mut hipModule_t, + image: *const ::core::ffi::c_void, + numOptions: ::core::ffi::c_uint, + options: *mut hipJitOption, + optionValues: *mut *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief launches kernel f with launch parameters and shared memory on stream with arguments passed + to kernelparams or extra + + @param [in] f Kernel to launch. + @param [in] gridDimX X grid dimension specified as multiple of blockDimX. + @param [in] gridDimY Y grid dimension specified as multiple of blockDimY. + @param [in] gridDimZ Z grid dimension specified as multiple of blockDimZ. + @param [in] blockDimX X block dimensions specified in work-items + @param [in] blockDimY Y grid dimension specified in work-items + @param [in] blockDimZ Z grid dimension specified in work-items + @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel. The + HIP-Clang compiler provides support for extern shared declarations. + @param [in] stream Stream where the kernel should be dispatched. May be 0, in which case th + default stream is used with associated synchronization rules. + @param [in] kernelParams Kernel parameters to launch + @param [in] extra Pointer to kernel arguments. These are passed directly to the kernel and + must be in the memory layout and alignment expected by the kernel. + All passed arguments must be naturally aligned according to their type. The memory address of each + argument should be a multiple of its size in bytes. Please refer to hip_porting_driver_api.md + for sample usage. + + Please note, HIP does not support kernel launch with total work items defined in dimension with + size gridDim x blockDim >= 2^32. So gridDim.x * blockDim.x, gridDim.y * blockDim.y + and gridDim.z * blockDim.z are always less than 2^32. + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue*/ + pub fn hipModuleLaunchKernel( + f: hipFunction_t, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + stream: hipStream_t, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief launches kernel f with launch parameters and shared memory on stream with arguments passed + to kernelParams, where thread blocks can cooperate and synchronize as they execute + + @param [in] f Kernel to launch. + @param [in] gridDimX X grid dimension specified as multiple of blockDimX. + @param [in] gridDimY Y grid dimension specified as multiple of blockDimY. + @param [in] gridDimZ Z grid dimension specified as multiple of blockDimZ. + @param [in] blockDimX X block dimension specified in work-items. + @param [in] blockDimY Y block dimension specified in work-items. + @param [in] blockDimZ Z block dimension specified in work-items. + @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel. The + HIP-Clang compiler provides support for extern shared declarations. + @param [in] stream Stream where the kernel should be dispatched. May be 0, + in which case the default stream is used with associated synchronization rules. + @param [in] kernelParams A list of kernel arguments. + + Please note, HIP does not support kernel launch with total work items defined in dimension with + size gridDim x blockDim >= 2^32. + + @returns #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidHandle, #hipErrorInvalidImage, #hipErrorInvalidValue, + #hipErrorInvalidConfiguration, #hipErrorLaunchFailure, #hipErrorLaunchOutOfResources, + #hipErrorLaunchTimeOut, #hipErrorCooperativeLaunchTooLarge, #hipErrorSharedObjectInitFailed*/ + pub fn hipModuleLaunchCooperativeKernel( + f: hipFunction_t, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + stream: hipStream_t, + kernelParams: *mut *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Launches kernels on multiple devices where thread blocks can cooperate and + synchronize as they execute. + + @param [in] launchParamsList List of launch parameters, one per device. + @param [in] numDevices Size of the launchParamsList array. + @param [in] flags Flags to control launch behavior. + + @returns #hipSuccess, #hipErrorDeinitialized, #hipErrorNotInitialized, #hipErrorInvalidContext, + #hipErrorInvalidHandle, #hipErrorInvalidImage, #hipErrorInvalidValue, + #hipErrorInvalidConfiguration, #hipErrorInvalidResourceHandle, #hipErrorLaunchFailure, + #hipErrorLaunchOutOfResources, #hipErrorLaunchTimeOut, #hipErrorCooperativeLaunchTooLarge, + #hipErrorSharedObjectInitFailed*/ + pub fn hipModuleLaunchCooperativeKernelMultiDevice( + launchParamsList: *mut hipFunctionLaunchParams, + numDevices: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief launches kernel f with launch parameters and shared memory on stream with arguments passed + to kernelparams or extra, where thread blocks can cooperate and synchronize as they execute + + @param [in] f Kernel to launch. + @param [in] gridDim Grid dimensions specified as multiple of blockDim. + @param [in] blockDimX Block dimensions specified in work-items + @param [in] kernelParams A list of kernel arguments + @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel. The + HIP-Clang compiler provides support for extern shared declarations. + @param [in] stream Stream where the kernel should be dispatched. May be 0, in which case th + default stream is used with associated synchronization rules. + + Please note, HIP does not support kernel launch with total work items defined in dimension with + size gridDim x blockDim >= 2^32. + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue, #hipErrorCooperativeLaunchTooLarge*/ + pub fn hipLaunchCooperativeKernel( + f: *const ::core::ffi::c_void, + gridDim: dim3, + blockDimX: dim3, + kernelParams: *mut *mut ::core::ffi::c_void, + sharedMemBytes: ::core::ffi::c_uint, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Launches kernels on multiple devices where thread blocks can cooperate and + synchronize as they execute. + + @param [in] launchParamsList List of launch parameters, one per device. + @param [in] numDevices Size of the launchParamsList array. + @param [in] flags Flags to control launch behavior. + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue, + #hipErrorCooperativeLaunchTooLarge*/ + pub fn hipLaunchCooperativeKernelMultiDevice( + launchParamsList: *mut hipLaunchParams, + numDevices: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Launches kernels on multiple devices and guarantees all specified kernels are dispatched + on respective streams before enqueuing any other work on the specified streams from any other threads + + + @param [in] launchParamsList List of launch parameters, one per device. + @param [in] numDevices Size of the launchParamsList array. + @param [in] flags Flags to control launch behavior. + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue*/ + pub fn hipExtLaunchMultiKernelMultiDevice( + launchParamsList: *mut hipLaunchParams, + numDevices: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = "-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Occupancy Occupancy\n @{\n This section describes the occupancy functions of HIP runtime API.\n\n/\n/**\n @brief determine the grid and block sizes to achieves maximum occupancy for a kernel\n\n @param [out] gridSize minimum grid size for maximum potential occupancy\n @param [out] blockSize block size for maximum potential occupancy\n @param [in] f kernel function for which occupancy is calulated\n @param [in] dynSharedMemPerBlk dynamic shared memory usage (in bytes) intended for each block\n @param [in] blockSizeLimit the maximum block size for the kernel, use 0 for no limit\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorInvalidValue"] + pub fn hipModuleOccupancyMaxPotentialBlockSize( + gridSize: *mut ::core::ffi::c_int, + blockSize: *mut ::core::ffi::c_int, + f: hipFunction_t, + dynSharedMemPerBlk: usize, + blockSizeLimit: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief determine the grid and block sizes to achieves maximum occupancy for a kernel + + @param [out] gridSize minimum grid size for maximum potential occupancy + @param [out] blockSize block size for maximum potential occupancy + @param [in] f kernel function for which occupancy is calulated + @param [in] dynSharedMemPerBlk dynamic shared memory usage (in bytes) intended for each block + @param [in] blockSizeLimit the maximum block size for the kernel, use 0 for no limit + @param [in] flags Extra flags for occupancy calculation (only default supported) + + Please note, HIP does not support kernel launch with total work items defined in dimension with + size gridDim x blockDim >= 2^32. + + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipModuleOccupancyMaxPotentialBlockSizeWithFlags( + gridSize: *mut ::core::ffi::c_int, + blockSize: *mut ::core::ffi::c_int, + f: hipFunction_t, + dynSharedMemPerBlk: usize, + blockSizeLimit: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns occupancy for a device function. + + @param [out] numBlocks Returned occupancy + @param [in] f Kernel function (hipFunction) for which occupancy is calulated + @param [in] blockSize Block size the kernel is intended to be launched with + @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipModuleOccupancyMaxActiveBlocksPerMultiprocessor( + numBlocks: *mut ::core::ffi::c_int, + f: hipFunction_t, + blockSize: ::core::ffi::c_int, + dynSharedMemPerBlk: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns occupancy for a device function. + + @param [out] numBlocks Returned occupancy + @param [in] f Kernel function(hipFunction_t) for which occupancy is calulated + @param [in] blockSize Block size the kernel is intended to be launched with + @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block + @param [in] flags Extra flags for occupancy calculation (only default supported) + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipModuleOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + numBlocks: *mut ::core::ffi::c_int, + f: hipFunction_t, + blockSize: ::core::ffi::c_int, + dynSharedMemPerBlk: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns occupancy for a device function. + + @param [out] numBlocks Returned occupancy + @param [in] f Kernel function for which occupancy is calulated + @param [in] blockSize Block size the kernel is intended to be launched with + @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block + @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue*/ + pub fn hipOccupancyMaxActiveBlocksPerMultiprocessor( + numBlocks: *mut ::core::ffi::c_int, + f: *const ::core::ffi::c_void, + blockSize: ::core::ffi::c_int, + dynSharedMemPerBlk: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns occupancy for a device function. + + @param [out] numBlocks Returned occupancy + @param [in] f Kernel function for which occupancy is calulated + @param [in] blockSize Block size the kernel is intended to be launched with + @param [in] dynSharedMemPerBlk Dynamic shared memory usage (in bytes) intended for each block + @param [in] flags Extra flags for occupancy calculation (currently ignored) + @returns #hipSuccess, #hipErrorInvalidDeviceFunction, #hipErrorInvalidValue*/ + pub fn hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + numBlocks: *mut ::core::ffi::c_int, + f: *const ::core::ffi::c_void, + blockSize: ::core::ffi::c_int, + dynSharedMemPerBlk: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief determine the grid and block sizes to achieves maximum occupancy for a kernel + + @param [out] gridSize minimum grid size for maximum potential occupancy + @param [out] blockSize block size for maximum potential occupancy + @param [in] f kernel function for which occupancy is calulated + @param [in] dynSharedMemPerBlk dynamic shared memory usage (in bytes) intended for each block + @param [in] blockSizeLimit the maximum block size for the kernel, use 0 for no limit + + Please note, HIP does not support kernel launch with total work items defined in dimension with + size gridDim x blockDim >= 2^32. + + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipOccupancyMaxPotentialBlockSize( + gridSize: *mut ::core::ffi::c_int, + blockSize: *mut ::core::ffi::c_int, + f: *const ::core::ffi::c_void, + dynSharedMemPerBlk: usize, + blockSizeLimit: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Start recording of profiling information + When using this API, start the profiler with profiling disabled. (--startdisabled) + @returns #hipErrorNotSupported + @warning : hipProfilerStart API is deprecated, use roctracer/rocTX instead.*/ + pub fn hipProfilerStart() -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Stop recording of profiling information. + When using this API, start the profiler with profiling disabled. (--startdisabled) + @returns #hipErrorNotSupported + @warning hipProfilerStart API is deprecated, use roctracer/rocTX instead.*/ + pub fn hipProfilerStop() -> hipError_t; +} +extern "C" { + #[must_use] + #[doc = " @}\n/\n/**\n-------------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------------\n @defgroup Clang Launch API to support the triple-chevron syntax\n @{\n This section describes the API to support the triple-chevron syntax.\n/\n/**\n @brief Configure a kernel launch.\n\n @param [in] gridDim grid dimension specified as multiple of blockDim.\n @param [in] blockDim block dimensions specified in work-items\n @param [in] sharedMem Amount of dynamic shared memory to allocate for this kernel. The\n HIP-Clang compiler provides support for extern shared declarations.\n @param [in] stream Stream where the kernel should be dispatched. May be 0, in which case the\n default stream is used with associated synchronization rules.\n\n Please note, HIP does not support kernel launch with total work items defined in dimension with\n size gridDim x blockDim >= 2^32.\n\n @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue\n"] + pub fn hipConfigureCall( + gridDim: dim3, + blockDim: dim3, + sharedMem: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set a kernel argument. + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue + + @param [in] arg Pointer the argument in host memory. + @param [in] size Size of the argument. + @param [in] offset Offset of the argument on the argument stack. +*/ + pub fn hipSetupArgument( + arg: *const ::core::ffi::c_void, + size: usize, + offset: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Launch a kernel. + + @param [in] func Kernel to launch. + + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue +*/ + pub fn hipLaunchByPtr(func: *const ::core::ffi::c_void) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief C compliant kernel launch API + + @param [in] function_address - kernel stub function pointer. + @param [in] numBlocks - number of blocks + @param [in] dimBlocks - dimension of a block + @param [in] args - kernel arguments + @param [in] sharedMemBytes - Amount of dynamic shared memory to allocate for this kernel. The + HIP-Clang compiler provides support for extern shared declarations. + @param [in] stream - Stream where the kernel should be dispatched. May be 0, in which case th + default stream is used with associated synchronization rules. + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipLaunchKernel( + function_address: *const ::core::ffi::c_void, + numBlocks: dim3, + dimBlocks: dim3, + args: *mut *mut ::core::ffi::c_void, + sharedMemBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Enqueues a host function call in a stream. + + @param [in] stream - The stream to enqueue work in. + @param [in] fn - The function to call once enqueued preceeding operations are complete. + @param [in] userData - User-specified data to be passed to the function. + + @returns #hipSuccess, #hipErrorInvalidResourceHandle, #hipErrorInvalidValue, + #hipErrorNotSupported + + The host function to call in this API will be executed after the preceding operations in + the stream are complete. The function is a blocking operation that blocks operations in the + stream that follow it, until the function is returned. + Event synchronization and internal callback functions make sure enqueued operations will + execute in order, in the stream. + + The host function must not make any HIP API calls. The host function is non-reentrant. It must + not perform sychronization with any operation that may depend on other processing execution + but is not enqueued to run earlier in the stream. + + Host functions that are enqueued respectively in different non-blocking streams can run concurrently. + + @warning This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipLaunchHostFunc( + stream: hipStream_t, + fn_: hipHostFn_t, + userData: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** Copies memory for 2D arrays. + + @param pCopy - Parameters for the memory copy + + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipDrvMemcpy2DUnaligned(pCopy: *const hip_Memcpy2D) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Launches kernel from the pointer address, with arguments and shared memory on stream. + + @param [in] function_address pointer to the Kernel to launch. + @param [in] numBlocks number of blocks. + @param [in] dimBlocks dimension of a block. + @param [in] args pointer to kernel arguments. + @param [in] sharedMemBytes Amount of dynamic shared memory to allocate for this kernel. + HIP-Clang compiler provides support for extern shared declarations. + @param [in] stream Stream where the kernel should be dispatched. + May be 0, in which case the default stream is used with associated synchronization rules. + @param [in] startEvent If non-null, specified event will be updated to track the start time of + the kernel launch. The event must be created before calling this API. + @param [in] stopEvent If non-null, specified event will be updated to track the stop time of + the kernel launch. The event must be created before calling this API. + @param [in] flags The value of hipExtAnyOrderLaunch, signifies if kernel can be + launched in any order. + @returns #hipSuccess, #hipErrorNotInitialized, #hipErrorInvalidValue. +*/ + pub fn hipExtLaunchKernel( + function_address: *const ::core::ffi::c_void, + numBlocks: dim3, + dimBlocks: dim3, + args: *mut *mut ::core::ffi::c_void, + sharedMemBytes: usize, + stream: hipStream_t, + startEvent: hipEvent_t, + stopEvent: hipEvent_t, + flags: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a texture object. + + @param [out] pTexObject pointer to the texture object to create + @param [in] pResDesc pointer to resource descriptor + @param [in] pTexDesc pointer to texture descriptor + @param [in] pResViewDesc pointer to resource view descriptor + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported, #hipErrorOutOfMemory + + @note 3D liner filter isn't supported on GFX90A boards, on which the API @p hipCreateTextureObject will + return hipErrorNotSupported. +*/ + pub fn hipCreateTextureObject( + pTexObject: *mut hipTextureObject_t, + pResDesc: *const hipResourceDesc, + pTexDesc: *const hipTextureDesc, + pResViewDesc: *const hipResourceViewDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys a texture object. + + @param [in] textureObject texture object to destroy + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipDestroyTextureObject(textureObject: hipTextureObject_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the channel descriptor in an array. + + @param [in] desc pointer to channel format descriptor + @param [out] array memory array on the device + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGetChannelDesc( + desc: *mut hipChannelFormatDesc, + array: hipArray_const_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets resource descriptor for the texture object. + + @param [out] pResDesc pointer to resource descriptor + @param [in] textureObject texture object + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGetTextureObjectResourceDesc( + pResDesc: *mut hipResourceDesc, + textureObject: hipTextureObject_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets resource view descriptor for the texture object. + + @param [out] pResViewDesc pointer to resource view descriptor + @param [in] textureObject texture object + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGetTextureObjectResourceViewDesc( + pResViewDesc: *mut hipResourceViewDesc, + textureObject: hipTextureObject_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets texture descriptor for the texture object. + + @param [out] pTexDesc pointer to texture descriptor + @param [in] textureObject texture object + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGetTextureObjectTextureDesc( + pTexDesc: *mut hipTextureDesc, + textureObject: hipTextureObject_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a texture object. + + @param [out] pTexObject pointer to texture object to create + @param [in] pResDesc pointer to resource descriptor + @param [in] pTexDesc pointer to texture descriptor + @param [in] pResViewDesc pointer to resource view descriptor + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipTexObjectCreate( + pTexObject: *mut hipTextureObject_t, + pResDesc: *const HIP_RESOURCE_DESC, + pTexDesc: *const HIP_TEXTURE_DESC, + pResViewDesc: *const HIP_RESOURCE_VIEW_DESC, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys a texture object. + + @param [in] texObject texture object to destroy + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipTexObjectDestroy(texObject: hipTextureObject_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets resource descriptor of a texture object. + + @param [out] pResDesc pointer to resource descriptor + @param [in] texObject texture object + + @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue +*/ + pub fn hipTexObjectGetResourceDesc( + pResDesc: *mut HIP_RESOURCE_DESC, + texObject: hipTextureObject_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets resource view descriptor of a texture object. + + @param [out] pResViewDesc pointer to resource view descriptor + @param [in] texObject texture object + + @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue +*/ + pub fn hipTexObjectGetResourceViewDesc( + pResViewDesc: *mut HIP_RESOURCE_VIEW_DESC, + texObject: hipTextureObject_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets texture descriptor of a texture object. + + @param [out] pTexDesc pointer to texture descriptor + @param [in] texObject texture object + + @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue +*/ + pub fn hipTexObjectGetTextureDesc( + pTexDesc: *mut HIP_TEXTURE_DESC, + texObject: hipTextureObject_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Allocate a mipmapped array on the device. + + @param[out] mipmappedArray - Pointer to allocated mipmapped array in device memory + @param[in] desc - Requested channel format + @param[in] extent - Requested allocation size (width field in elements) + @param[in] numLevels - Number of mipmap levels to allocate + @param[in] flags - Flags for extensions + + @return #hipSuccess, #hipErrorInvalidValue, #hipErrorMemoryAllocation + + @note This API is implemented on Windows, under development on Linux. +*/ + pub fn hipMallocMipmappedArray( + mipmappedArray: *mut hipMipmappedArray_t, + desc: *const hipChannelFormatDesc, + extent: hipExtent, + numLevels: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Frees a mipmapped array on the device. + + @param[in] mipmappedArray - Pointer to mipmapped array to free + + @return #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Windows, under development on Linux. +*/ + pub fn hipFreeMipmappedArray(mipmappedArray: hipMipmappedArray_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets a mipmap level of a HIP mipmapped array. + + @param[out] levelArray - Returned mipmap level HIP array + @param[in] mipmappedArray - HIP mipmapped array + @param[in] level - Mipmap level + + @return #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Windows, under development on Linux. +*/ + pub fn hipGetMipmappedArrayLevel( + levelArray: *mut hipArray_t, + mipmappedArray: hipMipmappedArray_const_t, + level: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create a mipmapped array. + + @param [out] pHandle pointer to mipmapped array + @param [in] pMipmappedArrayDesc mipmapped array descriptor + @param [in] numMipmapLevels mipmap level + + @returns #hipSuccess, #hipErrorNotSupported, #hipErrorInvalidValue + + @note This API is implemented on Windows, under development on Linux.*/ + pub fn hipMipmappedArrayCreate( + pHandle: *mut hipMipmappedArray_t, + pMipmappedArrayDesc: *mut HIP_ARRAY3D_DESCRIPTOR, + numMipmapLevels: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroy a mipmapped array. + + @param [out] hMipmappedArray pointer to mipmapped array to destroy + + @returns #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Windows, under development on Linux. +*/ + pub fn hipMipmappedArrayDestroy(hMipmappedArray: hipMipmappedArray_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get a mipmapped array on a mipmapped level. + + @param [in] pLevelArray Pointer of array + @param [out] hMipMappedArray Pointer of mipmapped array on the requested mipmap level + @param [out] level Mipmap level + + @returns #hipSuccess, #hipErrorInvalidValue + + @note This API is implemented on Windows, under development on Linux. +*/ + pub fn hipMipmappedArrayGetLevel( + pLevelArray: *mut hipArray_t, + hMipMappedArray: hipMipmappedArray_t, + level: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Binds a mipmapped array to a texture. + + @param [in] tex pointer to the texture reference to bind + @param [in] mipmappedArray memory mipmapped array on the device + @param [in] desc opointer to the channel format + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipBindTextureToMipmappedArray( + tex: *const textureReference, + mipmappedArray: hipMipmappedArray_const_t, + desc: *const hipChannelFormatDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the texture reference related with the symbol. + + @param [out] texref texture reference + @param [in] symbol pointer to the symbol related with the texture for the reference + + @returns #hipSuccess, #hipErrorInvalidValue + @warning This API is deprecated. +*/ + pub fn hipGetTextureReference( + texref: *mut *const textureReference, + symbol: *const ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the border color used by a texture reference. + + @param [out] pBorderColor Returned Type and Value of RGBA color. + @param [in] texRef Texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue + @warning This API is deprecated. +*/ + pub fn hipTexRefGetBorderColor( + pBorderColor: *mut f32, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the array bound to a texture reference. + + + @param [in] pArray Returned array. + @param [in] texRef texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue + @warning This API is deprecated. +*/ + pub fn hipTexRefGetArray( + pArray: *mut hipArray_t, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets address mode for a texture reference. + + @param [in] texRef texture reference. + @param [in] dim Dimension of the texture. + @param [in] am Value of the texture address mode. + + @returns #hipSuccess, #hipErrorInvalidValue + @warning This API is deprecated. +*/ + pub fn hipTexRefSetAddressMode( + texRef: *mut textureReference, + dim: ::core::ffi::c_int, + am: hipTextureAddressMode, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Binds an array as a texture reference. + + @param [in] tex Pointer texture reference. + @param [in] array Array to bind. + @param [in] flags Flags should be set as HIP_TRSA_OVERRIDE_FORMAT, as a valid value. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetArray( + tex: *mut textureReference, + array: hipArray_const_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set filter mode for a texture reference. + + @param [in] texRef Pointer texture reference. + @param [in] fm Value of texture filter mode. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetFilterMode( + texRef: *mut textureReference, + fm: hipTextureFilterMode, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set flags for a texture reference. + + @param [in] texRef Pointer texture reference. + @param [in] Flags Value of flags. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetFlags( + texRef: *mut textureReference, + Flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set format for a texture reference. + + @param [in] texRef Pointer texture reference. + @param [in] fmt Value of format. + @param [in] NumPackedComponents Number of components per array. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetFormat( + texRef: *mut textureReference, + fmt: hipArray_Format, + NumPackedComponents: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Binds a memory area to a texture. + + @param [in] offset Offset in bytes. + @param [in] tex Texture to bind. + @param [in] devPtr Pointer of memory on the device. + @param [in] desc Pointer of channel format descriptor. + @param [in] size Size of memory in bites. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipBindTexture( + offset: *mut usize, + tex: *const textureReference, + devPtr: *const ::core::ffi::c_void, + desc: *const hipChannelFormatDesc, + size: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Binds a 2D memory area to a texture. + + @param [in] offset Offset in bytes. + @param [in] tex Texture to bind. + @param [in] devPtr Pointer of 2D memory area on the device. + @param [in] desc Pointer of channel format descriptor. + @param [in] width Width in texel units. + @param [in] height Height in texel units. + @param [in] pitch Pitch in bytes. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipBindTexture2D( + offset: *mut usize, + tex: *const textureReference, + devPtr: *const ::core::ffi::c_void, + desc: *const hipChannelFormatDesc, + width: usize, + height: usize, + pitch: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Binds a memory area to a texture. + + @param [in] tex Pointer of texture reference. + @param [in] array Array to bind. + @param [in] desc Pointer of channel format descriptor. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipBindTextureToArray( + tex: *const textureReference, + array: hipArray_const_t, + desc: *const hipChannelFormatDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get the offset of the alignment in a texture. + + @param [in] offset Offset in bytes. + @param [in] texref Pointer of texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipGetTextureAlignmentOffset( + offset: *mut usize, + texref: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Unbinds a texture. + + @param [in] tex Texture to unbind. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipUnbindTexture(tex: *const textureReference) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the address for a texture reference. + + @param [out] dev_ptr Pointer of device address. + @param [in] texRef Pointer of texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetAddress( + dev_ptr: *mut hipDeviceptr_t, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the address mode for a texture reference. + + @param [out] pam Pointer of address mode. + @param [in] texRef Pointer of texture reference. + @param [in] dim Dimension. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetAddressMode( + pam: *mut hipTextureAddressMode, + texRef: *const textureReference, + dim: ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets filter mode for a texture reference. + + @param [out] pfm Pointer of filter mode. + @param [in] texRef Pointer of texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetFilterMode( + pfm: *mut hipTextureFilterMode, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets flags for a texture reference. + + @param [out] pFlags Pointer of flags. + @param [in] texRef Pointer of texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetFlags( + pFlags: *mut ::core::ffi::c_uint, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets texture format for a texture reference. + + @param [out] pFormat Pointer of the format. + @param [out] pNumChannels Pointer of number of channels. + @param [in] texRef Pointer of texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetFormat( + pFormat: *mut hipArray_Format, + pNumChannels: *mut ::core::ffi::c_int, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the maximum anisotropy for a texture reference. + + @param [out] pmaxAnsio Pointer of the maximum anisotropy. + @param [in] texRef Pointer of texture reference. + + @returns #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetMaxAnisotropy( + pmaxAnsio: *mut ::core::ffi::c_int, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the mipmap filter mode for a texture reference. + + @param [out] pfm Pointer of the mipmap filter mode. + @param [in] texRef Pointer of texture reference. + + @returns #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetMipmapFilterMode( + pfm: *mut hipTextureFilterMode, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the mipmap level bias for a texture reference. + + @param [out] pbias Pointer of the mipmap level bias. + @param [in] texRef Pointer of texture reference. + + @returns #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetMipmapLevelBias( + pbias: *mut f32, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the minimum and maximum mipmap level clamps for a texture reference. + + @param [out] pminMipmapLevelClamp Pointer of the minimum mipmap level clamp. + @param [out] pmaxMipmapLevelClamp Pointer of the maximum mipmap level clamp. + @param [in] texRef Pointer of texture reference. + + @returns #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetMipmapLevelClamp( + pminMipmapLevelClamp: *mut f32, + pmaxMipmapLevelClamp: *mut f32, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets the mipmapped array bound to a texture reference. + + @param [out] pArray Pointer of the mipmapped array. + @param [in] texRef Pointer of texture reference. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefGetMipMappedArray( + pArray: *mut hipMipmappedArray_t, + texRef: *const textureReference, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets an bound address for a texture reference. + + @param [out] ByteOffset Pointer of the offset in bytes. + @param [in] texRef Pointer of texture reference. + @param [in] dptr Pointer of device address to bind. + @param [in] bytes Size in bytes. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetAddress( + ByteOffset: *mut usize, + texRef: *mut textureReference, + dptr: hipDeviceptr_t, + bytes: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set a bind an address as a 2D texture reference. + + @param [in] texRef Pointer of texture reference. + @param [in] desc Pointer of array descriptor. + @param [in] dptr Pointer of device address to bind. + @param [in] Pitch Pitch in bytes. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetAddress2D( + texRef: *mut textureReference, + desc: *const HIP_ARRAY_DESCRIPTOR, + dptr: hipDeviceptr_t, + Pitch: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the maximum anisotropy for a texture reference. + + @param [in] texRef Pointer of texture reference. + @param [out] maxAniso Value of the maximum anisotropy. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetMaxAnisotropy( + texRef: *mut textureReference, + maxAniso: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets border color for a texture reference. + + @param [in] texRef Pointer of texture reference. + @param [in] pBorderColor Pointer of border color. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetBorderColor( + texRef: *mut textureReference, + pBorderColor: *mut f32, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets mipmap filter mode for a texture reference. + + @param [in] texRef Pointer of texture reference. + @param [in] fm Value of filter mode. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetMipmapFilterMode( + texRef: *mut textureReference, + fm: hipTextureFilterMode, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets mipmap level bias for a texture reference. + + @param [in] texRef Pointer of texture reference. + @param [in] bias Value of mipmap bias. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetMipmapLevelBias( + texRef: *mut textureReference, + bias: f32, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets mipmap level clamp for a texture reference. + + @param [in] texRef Pointer of texture reference. + @param [in] minMipMapLevelClamp Value of minimum mipmap level clamp. + @param [in] maxMipMapLevelClamp Value of maximum mipmap level clamp. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetMipmapLevelClamp( + texRef: *mut textureReference, + minMipMapLevelClamp: f32, + maxMipMapLevelClamp: f32, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Binds mipmapped array to a texture reference. + + @param [in] texRef Pointer of texture reference to bind. + @param [in] mipmappedArray Pointer of mipmapped array to bind. + @param [in] Flags Flags should be set as HIP_TRSA_OVERRIDE_FORMAT, as a valid value. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning This API is deprecated. +*/ + pub fn hipTexRefSetMipmappedArray( + texRef: *mut textureReference, + mipmappedArray: *mut hipMipmappedArray, + Flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[doc = " @defgroup Callback Callback Activity APIs\n @{\n This section describes the callback/Activity of HIP runtime API.\n/\n/**\n @brief Returns HIP API name by ID.\n\n @param [in] id ID of HIP API\n\n @returns #hipSuccess, #hipErrorInvalidValue\n"] + pub fn hipApiName(id: u32) -> *const ::core::ffi::c_char; +} +extern "C" { + /** @brief Returns kernel name reference by function name. + + @param [in] f Name of function + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipKernelNameRef(f: hipFunction_t) -> *const ::core::ffi::c_char; +} +extern "C" { + /** @brief Retrives kernel for a given host pointer, unless stated otherwise. + + @param [in] hostFunction Pointer of host function. + @param [in] stream Stream the kernel is executed on. + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipKernelNameRefByPtr( + hostFunction: *const ::core::ffi::c_void, + stream: hipStream_t, + ) -> *const ::core::ffi::c_char; +} +extern "C" { + /** @brief Returns device ID on the stream. + + @param [in] stream Stream of device executed on. + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGetStreamDeviceId(stream: hipStream_t) -> ::core::ffi::c_int; +} +extern "C" { + #[must_use] + /** @brief Begins graph capture on a stream. + + @param [in] stream - Stream to initiate capture. + @param [in] mode - Controls the interaction of this capture sequence with other API calls that + are not safe. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipStreamBeginCapture( + stream: hipStream_t, + mode: hipStreamCaptureMode, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Begins graph capture on a stream to an existing graph. + + @param [in] stream - Stream to initiate capture. + @param [in] graph - Graph to capture into. + @param [in] dependencies - Dependencies of the first node captured in the stream. Can be NULL if + numDependencies is 0. + @param [in] dependencyData - Optional array of data associated with each dependency. + @param [in] numDependencies - Number of dependencies. + @param [in] mode - Controls the interaction of this capture sequence with other API calls that +are not safe. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : param "const hipGraphEdgeData* dependencyData" is currently not supported and has to +passed as nullptr. This API is marked as beta, meaning, while this is feature complete, it is still +open to changes and may have outstanding issues.*/ + pub fn hipStreamBeginCaptureToGraph( + stream: hipStream_t, + graph: hipGraph_t, + dependencies: *const hipGraphNode_t, + dependencyData: *const hipGraphEdgeData, + numDependencies: usize, + mode: hipStreamCaptureMode, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Ends capture on a stream, returning the captured graph. + + @param [in] stream - Stream to end capture. + @param [out] pGraph - returns the graph captured. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipStreamEndCapture( + stream: hipStream_t, + pGraph: *mut hipGraph_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get capture status of a stream. + + @param [in] stream - Stream under capture. + @param [out] pCaptureStatus - returns current status of the capture. + @param [out] pId - unique ID of the capture. + + @returns #hipSuccess, #hipErrorStreamCaptureImplicit + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipStreamGetCaptureInfo( + stream: hipStream_t, + pCaptureStatus: *mut hipStreamCaptureStatus, + pId: *mut ::core::ffi::c_ulonglong, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get stream's capture state + + @param [in] stream - Stream under capture. + @param [out] captureStatus_out - returns current status of the capture. + @param [out] id_out - unique ID of the capture. + @param [in] graph_out - returns the graph being captured into. + @param [out] dependencies_out - returns pointer to an array of nodes. + @param [out] numDependencies_out - returns size of the array returned in dependencies_out. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorStreamCaptureImplicit + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipStreamGetCaptureInfo_v2( + stream: hipStream_t, + captureStatus_out: *mut hipStreamCaptureStatus, + id_out: *mut ::core::ffi::c_ulonglong, + graph_out: *mut hipGraph_t, + dependencies_out: *mut *const hipGraphNode_t, + numDependencies_out: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get stream's capture state + + @param [in] stream - Stream under capture. + @param [out] pCaptureStatus - returns current status of the capture. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorStreamCaptureImplicit + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipStreamIsCapturing( + stream: hipStream_t, + pCaptureStatus: *mut hipStreamCaptureStatus, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Update the set of dependencies in a capturing stream + + @param [in] stream Stream under capture. + @param [in] dependencies pointer to an array of nodes to Add/Replace. + @param [in] numDependencies size of the array in dependencies. + @param [in] flags Flag how to update dependency set. Should be one of value in enum + #hipStreamUpdateCaptureDependenciesFlags + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorIllegalState + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipStreamUpdateCaptureDependencies( + stream: hipStream_t, + dependencies: *mut hipGraphNode_t, + numDependencies: usize, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Swaps the stream capture mode of a thread. + + @param [in] mode - Pointer to mode value to swap with the current mode + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipThreadExchangeStreamCaptureMode( + mode: *mut hipStreamCaptureMode, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a graph + + @param [out] pGraph - pointer to graph to create. + @param [in] flags - flags for graph creation, must be 0. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorMemoryAllocation + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphCreate( + pGraph: *mut hipGraph_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys a graph + + @param [in] graph - instance of graph to destroy. + + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphDestroy(graph: hipGraph_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Adds dependency edges to a graph. + + @param [in] graph - instance of the graph to add dependencies. + @param [in] from - pointer to the graph nodes with dependenties to add from. + @param [in] to - pointer to the graph nodes to add dependenties to. + @param [in] numDependencies - the number of dependencies to add. + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphAddDependencies( + graph: hipGraph_t, + from: *const hipGraphNode_t, + to: *const hipGraphNode_t, + numDependencies: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Removes dependency edges from a graph. + + @param [in] graph - instance of the graph to remove dependencies. + @param [in] from - Array of nodes that provide the dependencies. + @param [in] to - Array of dependent nodes. + @param [in] numDependencies - the number of dependencies to remove. + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphRemoveDependencies( + graph: hipGraph_t, + from: *const hipGraphNode_t, + to: *const hipGraphNode_t, + numDependencies: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a graph's dependency edges. + + @param [in] graph - instance of the graph to get the edges from. + @param [out] from - pointer to the graph nodes to return edge endpoints. + @param [out] to - pointer to the graph nodes to return edge endpoints. + @param [out] numEdges - returns number of edges. + @returns #hipSuccess, #hipErrorInvalidValue + + from and to may both be NULL, in which case this function only returns the number of edges in + numEdges. Otherwise, numEdges entries will be filled in. If numEdges is higher than the actual + number of edges, the remaining entries in from and to will be set to NULL, and the number of + edges actually returned will be written to numEdges + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphGetEdges( + graph: hipGraph_t, + from: *mut hipGraphNode_t, + to: *mut hipGraphNode_t, + numEdges: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns graph nodes. + + @param [in] graph - instance of graph to get the nodes. + @param [out] nodes - pointer to return the graph nodes. + @param [out] numNodes - returns number of graph nodes. + @returns #hipSuccess, #hipErrorInvalidValue + + nodes may be NULL, in which case this function will return the number of nodes in numNodes. + Otherwise, numNodes entries will be filled in. If numNodes is higher than the actual number of + nodes, the remaining entries in nodes will be set to NULL, and the number of nodes actually + obtained will be returned in numNodes. + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphGetNodes( + graph: hipGraph_t, + nodes: *mut hipGraphNode_t, + numNodes: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns graph's root nodes. + + @param [in] graph - instance of the graph to get the nodes. + @param [out] pRootNodes - pointer to return the graph's root nodes. + @param [out] pNumRootNodes - returns the number of graph's root nodes. + @returns #hipSuccess, #hipErrorInvalidValue + + pRootNodes may be NULL, in which case this function will return the number of root nodes in + pNumRootNodes. Otherwise, pNumRootNodes entries will be filled in. If pNumRootNodes is higher + than the actual number of root nodes, the remaining entries in pRootNodes will be set to NULL, + and the number of nodes actually obtained will be returned in pNumRootNodes. + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphGetRootNodes( + graph: hipGraph_t, + pRootNodes: *mut hipGraphNode_t, + pNumRootNodes: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a node's dependencies. + + @param [in] node - graph node to get the dependencies from. + @param [out] pDependencies - pointer to to return the dependencies. + @param [out] pNumDependencies - returns the number of graph node dependencies. + @returns #hipSuccess, #hipErrorInvalidValue + + pDependencies may be NULL, in which case this function will return the number of dependencies in + pNumDependencies. Otherwise, pNumDependencies entries will be filled in. If pNumDependencies is + higher than the actual number of dependencies, the remaining entries in pDependencies will be set + to NULL, and the number of nodes actually obtained will be returned in pNumDependencies. + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphNodeGetDependencies( + node: hipGraphNode_t, + pDependencies: *mut hipGraphNode_t, + pNumDependencies: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a node's dependent nodes. + + @param [in] node - graph node to get the Dependent nodes from. + @param [out] pDependentNodes - pointer to return the graph dependent nodes. + @param [out] pNumDependentNodes - returns the number of graph node dependent nodes. + @returns #hipSuccess, #hipErrorInvalidValue + + DependentNodes may be NULL, in which case this function will return the number of dependent nodes + in pNumDependentNodes. Otherwise, pNumDependentNodes entries will be filled in. If + pNumDependentNodes is higher than the actual number of dependent nodes, the remaining entries in + pDependentNodes will be set to NULL, and the number of nodes actually obtained will be returned + in pNumDependentNodes. + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphNodeGetDependentNodes( + node: hipGraphNode_t, + pDependentNodes: *mut hipGraphNode_t, + pNumDependentNodes: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a node's type. + + @param [in] node - instance of the graph to add dependencies. + @param [out] pType - pointer to the return the type + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphNodeGetType( + node: hipGraphNode_t, + pType: *mut hipGraphNodeType, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Remove a node from the graph. + + @param [in] node - graph node to remove + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphDestroyNode(node: hipGraphNode_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Clones a graph. + + @param [out] pGraphClone - Returns newly created cloned graph. + @param [in] originalGraph - original graph to clone from. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorMemoryAllocation + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphClone( + pGraphClone: *mut hipGraph_t, + originalGraph: hipGraph_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Finds a cloned version of a node. + + @param [out] pNode - Returns the cloned node. + @param [in] originalNode - original node handle. + @param [in] clonedGraph - Cloned graph to query. + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphNodeFindInClone( + pNode: *mut hipGraphNode_t, + originalNode: hipGraphNode_t, + clonedGraph: hipGraph_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates an executable graph from a graph + + @param [out] pGraphExec - pointer to instantiated executable graph that is created. + @param [in] graph - instance of graph to instantiate. + @param [out] pErrorNode - pointer to error node in case error occured in graph instantiation, + it could modify the correponding node. + @param [out] pLogBuffer - pointer to log buffer. + @param [out] bufferSize - the size of log buffer. + + @returns #hipSuccess, #hipErrorOutOfMemory + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. +*/ + pub fn hipGraphInstantiate( + pGraphExec: *mut hipGraphExec_t, + graph: hipGraph_t, + pErrorNode: *mut hipGraphNode_t, + pLogBuffer: *mut ::core::ffi::c_char, + bufferSize: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates an executable graph from a graph. + + @param [out] pGraphExec - pointer to instantiated executable graph that is created. + @param [in] graph - instance of graph to instantiate. + @param [in] flags - Flags to control instantiation. + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.It does not support + any of flag and is behaving as hipGraphInstantiate.*/ + pub fn hipGraphInstantiateWithFlags( + pGraphExec: *mut hipGraphExec_t, + graph: hipGraph_t, + flags: ::core::ffi::c_ulonglong, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates an executable graph from a graph. + + @param [out] pGraphExec - pointer to instantiated executable graph that is created. + @param [in] graph - instance of graph to instantiate. + @param [in] instantiateParams - Graph Instantiate Params + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphInstantiateWithParams( + pGraphExec: *mut hipGraphExec_t, + graph: hipGraph_t, + instantiateParams: *mut hipGraphInstantiateParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief launches an executable graph in a stream + + @param [in] graphExec - instance of executable graph to launch. + @param [in] stream - instance of stream in which to launch executable graph. + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphLaunch(graphExec: hipGraphExec_t, stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief uploads an executable graph in a stream + + @param [in] graphExec - instance of executable graph to launch. + @param [in] stream - instance of stream in which to launch executable graph. + @returns #hipSuccess, #hipErrorInvalidValue + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphUpload(graphExec: hipGraphExec_t, stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a kernel execution node and adds it to a graph. + + @param [out] pGraphNode - pointer to graph node to create. + @param [in] graph - instance of graph to add the created node. + @param [in] pDependencies - pointer to the dependencies on the kernel execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] nodeParams - pointer to the parameters for the node. + @returns #hipSuccess, #hipErrorInvalidValue. + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + nodeParams: *mut hipGraphNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroys an executable graph + + @param [in] graphExec - instance of executable graph to destry. + + @returns #hipSuccess. + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecDestroy(graphExec: hipGraphExec_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Check whether an executable graph can be updated with a graph and perform the update if * + possible. + + @param [in] hGraphExec - instance of executable graph to update. + @param [in] hGraph - graph that contains the updated parameters. + @param [in] hErrorNode_out - node which caused the permissibility check to forbid the update. + @param [in] updateResult_out - Whether the graph update was permitted. + @returns #hipSuccess, #hipErrorGraphExecUpdateFailure + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecUpdate( + hGraphExec: hipGraphExec_t, + hGraph: hipGraph_t, + hErrorNode_out: *mut hipGraphNode_t, + updateResult_out: *mut hipGraphExecUpdateResult, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a kernel execution node and adds it to a graph. + + @param [out] pGraphNode - pointer to graph node to create. + @param [in] graph - instance of graph to add the created node. + @param [in] pDependencies - pointer to the dependencies on the kernel execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] pNodeParams - pointer to the parameters to the kernel execution node on the GPU. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorInvalidDeviceFunction + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddKernelNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + pNodeParams: *const hipKernelNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets kernel node's parameters. + + @param [in] node - instance of the node to get parameters from. + @param [out] pNodeParams - pointer to the parameters + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphKernelNodeGetParams( + node: hipGraphNode_t, + pNodeParams: *mut hipKernelNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a kernel node's parameters. + + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - const pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphKernelNodeSetParams( + node: hipGraphNode_t, + pNodeParams: *const hipKernelNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the parameters for a kernel node in the given graphExec. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - const pointer to the kernel node parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecKernelNodeSetParams( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + pNodeParams: *const hipKernelNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memcpy node and adds it to a graph. + + @param [out] phGraphNode - pointer to graph node to create. + @param [in] hGraph - instance of graph to add the created node. + @param [in] dependencies - const pointer to the dependencies on the memcpy execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] copyParams - const pointer to the parameters for the memory copy. + @param [in] ctx - cotext related to current device. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDrvGraphAddMemcpyNode( + phGraphNode: *mut hipGraphNode_t, + hGraph: hipGraph_t, + dependencies: *const hipGraphNode_t, + numDependencies: usize, + copyParams: *const HIP_MEMCPY3D, + ctx: hipCtx_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memcpy node and adds it to a graph. + + @param [out] pGraphNode - pointer to graph node to create. + @param [in] graph - instance of graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] pCopyParams - const pointer to the parameters for the memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddMemcpyNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + pCopyParams: *const hipMemcpy3DParms, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets a memcpy node's parameters. + + @param [in] node - instance of the node to get parameters from. + @param [out] pNodeParams - pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemcpyNodeGetParams( + node: hipGraphNode_t, + pNodeParams: *mut hipMemcpy3DParms, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a memcpy node's parameters. + + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - const pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemcpyNodeSetParams( + node: hipGraphNode_t, + pNodeParams: *const hipMemcpy3DParms, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a node attribute. + + @param [in] hNode - instance of the node to set parameters to. + @param [in] attr - the attribute node is set to. + @param [in] value - const pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphKernelNodeSetAttribute( + hNode: hipGraphNode_t, + attr: hipLaunchAttributeID, + value: *const hipLaunchAttributeValue, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets a node attribute. + + @param [in] hNode - instance of the node to set parameters to. + @param [in] attr - the attribute node is set to. + @param [in] value - const pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphKernelNodeGetAttribute( + hNode: hipGraphNode_t, + attr: hipLaunchAttributeID, + value: *mut hipLaunchAttributeValue, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the parameters for a memcpy node in the given graphExec. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - const pointer to the kernel node parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecMemcpyNodeSetParams( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + pNodeParams: *mut hipMemcpy3DParms, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a 1D memcpy node and adds it to a graph. + + @param [out] pGraphNode - pointer to graph node to create. + @param [in] graph - instance of graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] dst - pointer to memory address to the destination. + @param [in] src - pointer to memory address to the source. + @param [in] count - the size of the memory to copy. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddMemcpyNode1D( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + count: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a memcpy node's parameters to perform a 1-dimensional copy. + + @param [in] node - instance of the node to set parameters to. + @param [in] dst - pointer to memory address to the destination. + @param [in] src - pointer to memory address to the source. + @param [in] count - the size of the memory to copy. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemcpyNodeSetParams1D( + node: hipGraphNode_t, + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + count: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the parameters for a memcpy node in the given graphExec to perform a 1-dimensional + copy. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - instance of the node to set parameters to. + @param [in] dst - pointer to memory address to the destination. + @param [in] src - pointer to memory address to the source. + @param [in] count - the size of the memory to copy. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecMemcpyNodeSetParams1D( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + count: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph. + + @param [out] pGraphNode - pointer to graph node to create. + @param [in] graph - instance of graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] dst - pointer to memory address to the destination. + @param [in] symbol - Device symbol address. + @param [in] count - the size of the memory to copy. + @param [in] offset - Offset from start of symbol in bytes. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddMemcpyNodeFromSymbol( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + dst: *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + count: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a memcpy node's parameters to copy from a symbol on the device. + + @param [in] node - instance of the node to set parameters to. + @param [in] dst - pointer to memory address to the destination. + @param [in] symbol - Device symbol address. + @param [in] count - the size of the memory to copy. + @param [in] offset - Offset from start of symbol in bytes. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemcpyNodeSetParamsFromSymbol( + node: hipGraphNode_t, + dst: *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + count: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the + * device. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - instance of the node to set parameters to. + @param [in] dst - pointer to memory address to the destination. + @param [in] symbol - Device symbol address. + @param [in] count - the size of the memory to copy. + @param [in] offset - Offset from start of symbol in bytes. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecMemcpyNodeSetParamsFromSymbol( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + dst: *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + count: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph. + + @param [out] pGraphNode - pointer to graph node to create. + @param [in] graph - instance of graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memcpy execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] symbol - Device symbol address. + @param [in] src - pointer to memory address of the src. + @param [in] count - the size of the memory to copy. + @param [in] offset - Offset from start of symbol in bytes. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddMemcpyNodeToSymbol( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + symbol: *const ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + count: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a memcpy node's parameters to copy to a symbol on the device. + + @param [in] node - instance of the node to set parameters to. + @param [in] symbol - Device symbol address. + @param [in] src - pointer to memory address of the src. + @param [in] count - the size of the memory to copy. + @param [in] offset - Offset from start of symbol in bytes. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemcpyNodeSetParamsToSymbol( + node: hipGraphNode_t, + symbol: *const ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + count: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the + device. + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - instance of the node to set parameters to. + @param [in] symbol - Device symbol address. + @param [in] src - pointer to memory address of the src. + @param [in] count - the size of the memory to copy. + @param [in] offset - Offset from start of symbol in bytes. + @param [in] kind - the type of memory copy. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecMemcpyNodeSetParamsToSymbol( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + symbol: *const ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + count: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memset node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create. + @param [in] graph - instance of the graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memset execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] pMemsetParams - const pointer to the parameters for the memory set. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddMemsetNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + pMemsetParams: *const hipMemsetParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets a memset node's parameters. + + @param [in] node - instane of the node to get parameters from. + @param [out] pNodeParams - pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemsetNodeGetParams( + node: hipGraphNode_t, + pNodeParams: *mut hipMemsetParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a memset node's parameters. + + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemsetNodeSetParams( + node: hipGraphNode_t, + pNodeParams: *const hipMemsetParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the parameters for a memset node in the given graphExec. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecMemsetNodeSetParams( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + pNodeParams: *const hipMemsetParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a host execution node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create. + @param [in] graph - instance of the graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memset execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] pNodeParams -pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddHostNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + pNodeParams: *const hipHostNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns a host node's parameters. + + @param [in] node - instane of the node to get parameters from. + @param [out] pNodeParams - pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphHostNodeGetParams( + node: hipGraphNode_t, + pNodeParams: *mut hipHostNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets a host node's parameters. + + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphHostNodeSetParams( + node: hipGraphNode_t, + pNodeParams: *const hipHostNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the parameters for a host node in the given graphExec. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - instance of the node to set parameters to. + @param [in] pNodeParams - pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecHostNodeSetParams( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + pNodeParams: *const hipHostNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a child graph node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create. + @param [in] graph - instance of the graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memset execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] childGraph - the graph to clone into this node + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddChildGraphNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + childGraph: hipGraph_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets a handle to the embedded graph of a child graph node. + + @param [in] node - instane of the node to get child graph. + @param [out] pGraph - pointer to get the graph. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphChildGraphNodeGetGraph( + node: hipGraphNode_t, + pGraph: *mut hipGraph_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Updates node parameters in the child graph node in the given graphExec. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] node - node from the graph which was used to instantiate graphExec. + @param [in] childGraph - child graph with updated parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecChildGraphNodeSetParams( + hGraphExec: hipGraphExec_t, + node: hipGraphNode_t, + childGraph: hipGraph_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates an empty node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create and add to the graph. + @param [in] graph - instane of the graph the node is add to. + @param [in] pDependencies - const pointer to the node dependenties. + @param [in] numDependencies - the number of dependencies. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddEmptyNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates an event record node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create and add to the graph. + @param [in] graph - instane of the graph the node to be added. + @param [in] pDependencies - const pointer to the node dependenties. + @param [in] numDependencies - the number of dependencies. + @param [in] event - Event for the node. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddEventRecordNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + event: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the event associated with an event record node. + + @param [in] node - instane of the node to get event from. + @param [out] event_out - Pointer to return the event. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphEventRecordNodeGetEvent( + node: hipGraphNode_t, + event_out: *mut hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets an event record node's event. + + @param [in] node - instane of the node to set event to. + @param [in] event - pointer to the event. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphEventRecordNodeSetEvent( + node: hipGraphNode_t, + event: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the event for an event record node in the given graphExec. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] hNode - node from the graph which was used to instantiate graphExec. + @param [in] event - pointer to the event. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecEventRecordNodeSetEvent( + hGraphExec: hipGraphExec_t, + hNode: hipGraphNode_t, + event: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates an event wait node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create and add to the graph. + @param [in] graph - instane of the graph the node to be added. + @param [in] pDependencies - const pointer to the node dependenties. + @param [in] numDependencies - the number of dependencies. + @param [in] event - Event for the node. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddEventWaitNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + event: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the event associated with an event wait node. + + @param [in] node - instane of the node to get event from. + @param [out] event_out - Pointer to return the event. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphEventWaitNodeGetEvent( + node: hipGraphNode_t, + event_out: *mut hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets an event wait node's event. + + @param [in] node - instane of the node to set event to. + @param [in] event - pointer to the event. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphEventWaitNodeSetEvent( + node: hipGraphNode_t, + event: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Sets the event for an event record node in the given graphExec. + + @param [in] hGraphExec - instance of the executable graph with the node. + @param [in] hNode - node from the graph which was used to instantiate graphExec. + @param [in] event - pointer to the event. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecEventWaitNodeSetEvent( + hGraphExec: hipGraphExec_t, + hNode: hipGraphNode_t, + event: hipEvent_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memory allocation node and adds it to a graph + + @param [out] pGraphNode - Pointer to the graph node to create and add to the graph + @param [in] graph - Instane of the graph the node to be added + @param [in] pDependencies - Const pointer to the node dependenties + @param [in] numDependencies - The number of dependencies + @param [in] pNodeParams - Node parameters for memory allocation + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddMemAllocNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + pNodeParams: *mut hipMemAllocNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns parameters for memory allocation node + + @param [in] node - Memory allocation node for a query + @param [out] pNodeParams - Parameters for the specified memory allocation node + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemAllocNodeGetParams( + node: hipGraphNode_t, + pNodeParams: *mut hipMemAllocNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memory free node and adds it to a graph + + @param [out] pGraphNode - Pointer to the graph node to create and add to the graph + @param [in] graph - Instane of the graph the node to be added + @param [in] pDependencies - Const pointer to the node dependenties + @param [in] numDependencies - The number of dependencies + @param [in] dev_ptr - Pointer to the memory to be freed + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddMemFreeNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + dev_ptr: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns parameters for memory free node + + @param [in] node - Memory free node for a query + @param [out] dev_ptr - Device pointer for the specified memory free node + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphMemFreeNodeGetParams( + node: hipGraphNode_t, + dev_ptr: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get the mem attribute for graphs. + + @param [in] device - device the attr is get for. + @param [in] attr - attr to get. + @param [out] value - value for specific attr. + @returns #hipSuccess, #hipErrorInvalidDevice + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDeviceGetGraphMemAttribute( + device: ::core::ffi::c_int, + attr: hipGraphMemAttributeType, + value: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set the mem attribute for graphs. + + @param [in] device - device the attr is set for. + @param [in] attr - attr to set. + @param [in] value - value for specific attr. + @returns #hipSuccess, #hipErrorInvalidDevice + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDeviceSetGraphMemAttribute( + device: ::core::ffi::c_int, + attr: hipGraphMemAttributeType, + value: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Free unused memory on specific device used for graph back to OS. + + @param [in] device - device the memory is used for graphs + @returns #hipSuccess, #hipErrorInvalidDevice + + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDeviceGraphMemTrim(device: ::core::ffi::c_int) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create an instance of userObject to manage lifetime of a resource. + + @param [out] object_out - pointer to instace of userobj. + @param [in] ptr - pointer to pass to destroy function. + @param [in] destroy - destroy callback to remove resource. + @param [in] initialRefcount - reference to resource. + @param [in] flags - flags passed to API. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipUserObjectCreate( + object_out: *mut hipUserObject_t, + ptr: *mut ::core::ffi::c_void, + destroy: hipHostFn_t, + initialRefcount: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Release number of references to resource. + + @param [in] object - pointer to instace of userobj. + @param [in] count - reference to resource to be retained. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipUserObjectRelease( + object: hipUserObject_t, + count: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Retain number of references to resource. + + @param [in] object - pointer to instace of userobj. + @param [in] count - reference to resource to be retained. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipUserObjectRetain( + object: hipUserObject_t, + count: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Retain user object for graphs. + + @param [in] graph - pointer to graph to retain the user object for. + @param [in] object - pointer to instace of userobj. + @param [in] count - reference to resource to be retained. + @param [in] flags - flags passed to API. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphRetainUserObject( + graph: hipGraph_t, + object: hipUserObject_t, + count: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Release user object from graphs. + + @param [in] graph - pointer to graph to retain the user object for. + @param [in] object - pointer to instace of userobj. + @param [in] count - reference to resource to be retained. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphReleaseUserObject( + graph: hipGraph_t, + object: hipUserObject_t, + count: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Write a DOT file describing graph structure. + + @param [in] graph - graph object for which DOT file has to be generated. + @param [in] path - path to write the DOT file. + @param [in] flags - Flags from hipGraphDebugDotFlags to get additional node information. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorOperatingSystem + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphDebugDotPrint( + graph: hipGraph_t, + path: *const ::core::ffi::c_char, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Copies attributes from source node to destination node. + + Copies attributes from source node to destination node. + Both node must have the same context. + + @param [out] hDst - Destination node. + @param [in] hSrc - Source node. + For list of attributes see ::hipKernelNodeAttrID. + + @returns #hipSuccess, #hipErrorInvalidContext + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphKernelNodeCopyAttributes( + hSrc: hipGraphNode_t, + hDst: hipGraphNode_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Enables or disables the specified node in the given graphExec + + Sets hNode to be either enabled or disabled. Disabled nodes are functionally equivalent + to empty nodes until they are reenabled. Existing node parameters are not affected by + disabling/enabling the node. + + The node is identified by the corresponding hNode in the non-executable graph, from which the + executable graph was instantiated. + + hNode must not have been removed from the original graph. + + @note Currently only kernel, memset and memcpy nodes are supported. + + @param [in] hGraphExec - The executable graph in which to set the specified node. + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [in] isEnabled - Node is enabled if != 0, otherwise the node is disabled. + + @returns #hipSuccess, #hipErrorInvalidValue, + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphNodeSetEnabled( + hGraphExec: hipGraphExec_t, + hNode: hipGraphNode_t, + isEnabled: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Query whether a node in the given graphExec is enabled + + Sets isEnabled to 1 if hNode is enabled, or 0 if it is disabled. + + The node is identified by the corresponding node in the non-executable graph, from which the + executable graph was instantiated. + + hNode must not have been removed from the original graph. + + @note Currently only kernel, memset and memcpy nodes are supported. + + @param [in] hGraphExec - The executable graph in which to set the specified node. + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [out] isEnabled - Location to return the enabled status of the node. + + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphNodeGetEnabled( + hGraphExec: hipGraphExec_t, + hNode: hipGraphNode_t, + isEnabled: *mut ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a external semaphor wait node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create. + @param [in] graph - instance of the graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memset execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] nodeParams -pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddExternalSemaphoresWaitNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + nodeParams: *const hipExternalSemaphoreWaitNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a external semaphor signal node and adds it to a graph. + + @param [out] pGraphNode - pointer to the graph node to create. + @param [in] graph - instance of the graph to add the created node. + @param [in] pDependencies - const pointer to the dependencies on the memset execution node. + @param [in] numDependencies - the number of the dependencies. + @param [in] nodeParams -pointer to the parameters. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphAddExternalSemaphoresSignalNode( + pGraphNode: *mut hipGraphNode_t, + graph: hipGraph_t, + pDependencies: *const hipGraphNode_t, + numDependencies: usize, + nodeParams: *const hipExternalSemaphoreSignalNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Updates node parameters in the external semaphore signal node. + + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [in] nodeParams - Pointer to the params to be set. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExternalSemaphoresSignalNodeSetParams( + hNode: hipGraphNode_t, + nodeParams: *const hipExternalSemaphoreSignalNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Updates node parameters in the external semaphore wait node. + + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [in] nodeParams - Pointer to the params to be set. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExternalSemaphoresWaitNodeSetParams( + hNode: hipGraphNode_t, + nodeParams: *const hipExternalSemaphoreWaitNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns external semaphore signal node params. + + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [out] params_out - Pointer to params. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExternalSemaphoresSignalNodeGetParams( + hNode: hipGraphNode_t, + params_out: *mut hipExternalSemaphoreSignalNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns external semaphore wait node params. + + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [out] params_out - Pointer to params. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExternalSemaphoresWaitNodeGetParams( + hNode: hipGraphNode_t, + params_out: *mut hipExternalSemaphoreWaitNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Updates node parameters in the external semaphore signal node in the given graphExec. + + @param [in] hGraphExec - The executable graph in which to set the specified node. + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [in] nodeParams - Pointer to the params to be set. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecExternalSemaphoresSignalNodeSetParams( + hGraphExec: hipGraphExec_t, + hNode: hipGraphNode_t, + nodeParams: *const hipExternalSemaphoreSignalNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Updates node parameters in the external semaphore wait node in the given graphExec. + + @param [in] hGraphExec - The executable graph in which to set the specified node. + @param [in] hNode - Node from the graph from which graphExec was instantiated. + @param [in] nodeParams - Pointer to the params to be set. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipGraphExecExternalSemaphoresWaitNodeSetParams( + hGraphExec: hipGraphExec_t, + hNode: hipGraphNode_t, + nodeParams: *const hipExternalSemaphoreWaitNodeParams, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memset node and adds it to a graph. + + @param [out] phGraphNode - pointer to graph node to create. + @param [in] hGraph - instance of graph to add the created node to. + @param [in] dependencies - const pointer to the dependencies on the memset execution node. + @param [in] numDependencies - number of the dependencies. + @param [in] memsetParams - const pointer to the parameters for the memory set. + @param [in] ctx - cotext related to current device. + @returns #hipSuccess, #hipErrorInvalidValue + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues.*/ + pub fn hipDrvGraphAddMemsetNode( + phGraphNode: *mut hipGraphNode_t, + hGraph: hipGraph_t, + dependencies: *const hipGraphNode_t, + numDependencies: usize, + memsetParams: *const HIP_MEMSET_NODE_PARAMS, + ctx: hipCtx_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Frees an address range reservation made via hipMemAddressReserve + + @param [in] devPtr - starting address of the range. + @param [in] size - size of the range. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemAddressFree( + devPtr: *mut ::core::ffi::c_void, + size: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Reserves an address range + + @param [out] ptr - starting address of the reserved range. + @param [in] size - size of the reservation. + @param [in] alignment - alignment of the address. + @param [in] addr - requested starting address of the range. + @param [in] flags - currently unused, must be zero. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemAddressReserve( + ptr: *mut *mut ::core::ffi::c_void, + size: usize, + alignment: usize, + addr: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_ulonglong, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Creates a memory allocation described by the properties and size + + @param [out] handle - value of the returned handle. + @param [in] size - size of the allocation. + @param [in] prop - properties of the allocation. + @param [in] flags - currently unused, must be zero. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemCreate( + handle: *mut hipMemGenericAllocationHandle_t, + size: usize, + prop: *const hipMemAllocationProp, + flags: ::core::ffi::c_ulonglong, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Exports an allocation to a requested shareable handle type. + + @param [out] shareableHandle - value of the returned handle. + @param [in] handle - handle to share. + @param [in] handleType - type of the shareable handle. + @param [in] flags - currently unused, must be zero. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemExportToShareableHandle( + shareableHandle: *mut ::core::ffi::c_void, + handle: hipMemGenericAllocationHandle_t, + handleType: hipMemAllocationHandleType, + flags: ::core::ffi::c_ulonglong, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get the access flags set for the given location and ptr. + + @param [out] flags - flags for this location. + @param [in] location - target location. + @param [in] ptr - address to check the access flags. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemGetAccess( + flags: *mut ::core::ffi::c_ulonglong, + location: *const hipMemLocation, + ptr: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Calculates either the minimal or recommended granularity. + + @param [out] granularity - returned granularity. + @param [in] prop - location properties. + @param [in] option - determines which granularity to return. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows. +*/ + pub fn hipMemGetAllocationGranularity( + granularity: *mut usize, + prop: *const hipMemAllocationProp, + option: hipMemAllocationGranularity_flags, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Retrieve the property structure of the given handle. + + @param [out] prop - properties of the given handle. + @param [in] handle - handle to perform the query on. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux under development on Windows.*/ + pub fn hipMemGetAllocationPropertiesFromHandle( + prop: *mut hipMemAllocationProp, + handle: hipMemGenericAllocationHandle_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Imports an allocation from a requested shareable handle type. + + @param [out] handle - returned value. + @param [in] osHandle - shareable handle representing the memory allocation. + @param [in] shHandleType - handle type. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemImportFromShareableHandle( + handle: *mut hipMemGenericAllocationHandle_t, + osHandle: *mut ::core::ffi::c_void, + shHandleType: hipMemAllocationHandleType, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Maps an allocation handle to a reserved virtual address range. + + @param [in] ptr - address where the memory will be mapped. + @param [in] size - size of the mapping. + @param [in] offset - offset into the memory, currently must be zero. + @param [in] handle - memory allocation to be mapped. + @param [in] flags - currently unused, must be zero. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemMap( + ptr: *mut ::core::ffi::c_void, + size: usize, + offset: usize, + handle: hipMemGenericAllocationHandle_t, + flags: ::core::ffi::c_ulonglong, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Maps or unmaps subregions of sparse HIP arrays and sparse HIP mipmapped arrays. + + @param [in] mapInfoList - list of hipArrayMapInfo. + @param [in] count - number of hipArrayMapInfo in mapInfoList. + @param [in] stream - stream identifier for the stream to use for map or unmap operations. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemMapArrayAsync( + mapInfoList: *mut hipArrayMapInfo, + count: ::core::ffi::c_uint, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Release a memory handle representing a memory allocation which was previously allocated through hipMemCreate. + + @param [in] handle - handle of the memory allocation. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemRelease(handle: hipMemGenericAllocationHandle_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Returns the allocation handle of the backing memory allocation given the address. + + @param [out] handle - handle representing addr. + @param [in] addr - address to look up. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemRetainAllocationHandle( + handle: *mut hipMemGenericAllocationHandle_t, + addr: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Set the access flags for each location specified in desc for the given virtual address range. + + @param [in] ptr - starting address of the virtual address range. + @param [in] size - size of the range. + @param [in] desc - array of hipMemAccessDesc. + @param [in] count - number of hipMemAccessDesc in desc. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemSetAccess( + ptr: *mut ::core::ffi::c_void, + size: usize, + desc: *const hipMemAccessDesc, + count: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Unmap memory allocation of a given address range. + + @param [in] ptr - starting address of the range to unmap. + @param [in] size - size of the virtual address range. + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorNotSupported + @warning : This API is marked as beta, meaning, while this is feature complete, + it is still open to changes and may have outstanding issues. + + @note This API is implemented on Linux, under development on Windows.*/ + pub fn hipMemUnmap(ptr: *mut ::core::ffi::c_void, size: usize) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Maps a graphics resource for access. + + @param [in] count - Number of resources to map. + @param [in] resources - Pointer of resources to map. + @param [in] stream - Stream for synchronization. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown, #hipErrorInvalidResourceHandle +*/ + pub fn hipGraphicsMapResources( + count: ::core::ffi::c_int, + resources: *mut hipGraphicsResource_t, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Get an array through which to access a subresource of a mapped graphics resource. + + @param [out] array - Pointer of array through which a subresource of resource may be accessed. + @param [in] resource - Mapped resource to access. + @param [in] arrayIndex - Array index for the subresource to access. + @param [in] mipLevel - Mipmap level for the subresource to access. + + @returns #hipSuccess, #hipErrorInvalidValue + + @note In this API, the value of arrayIndex higher than zero is currently not supported. +*/ + pub fn hipGraphicsSubResourceGetMappedArray( + array: *mut hipArray_t, + resource: hipGraphicsResource_t, + arrayIndex: ::core::ffi::c_uint, + mipLevel: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Gets device accessible address of a graphics resource. + + @param [out] devPtr - Pointer of device through which graphic resource may be accessed. + @param [out] size - Size of the buffer accessible from devPtr. + @param [in] resource - Mapped resource to access. + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipGraphicsResourceGetMappedPointer( + devPtr: *mut *mut ::core::ffi::c_void, + size: *mut usize, + resource: hipGraphicsResource_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Unmaps graphics resources. + + @param [in] count - Number of resources to unmap. + @param [in] resources - Pointer of resources to unmap. + @param [in] stream - Stream for synchronization. + + @returns #hipSuccess, #hipErrorInvalidValue, #hipErrorUnknown, #hipErrorContextIsDestroyed +*/ + pub fn hipGraphicsUnmapResources( + count: ::core::ffi::c_int, + resources: *mut hipGraphicsResource_t, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Unregisters a graphics resource. + + @param [in] resource - Graphics resources to unregister. + + @returns #hipSuccess +*/ + pub fn hipGraphicsUnregisterResource(resource: hipGraphicsResource_t) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Create a surface object. + + @param [out] pSurfObject Pointer of surface object to be created. + @param [in] pResDesc Pointer of suface object descriptor. + + @returns #hipSuccess, #hipErrorInvalidValue +*/ + pub fn hipCreateSurfaceObject( + pSurfObject: *mut hipSurfaceObject_t, + pResDesc: *const hipResourceDesc, + ) -> hipError_t; +} +extern "C" { + #[must_use] + /** @brief Destroy a surface object. + + @param [in] surfaceObject Surface object to be destroyed. + + @returns #hipSuccess, #hipErrorInvalidValue*/ + pub fn hipDestroySurfaceObject(surfaceObject: hipSurfaceObject_t) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy_spt( + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpyToSymbol_spt( + symbol: *const ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpyFromSymbol_spt( + dst: *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy2D_spt( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy2DFromArray_spt( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: hipArray_const_t, + wOffset: usize, + hOffset: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy3D_spt(p: *const hipMemcpy3DParms) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemset_spt( + dst: *mut ::core::ffi::c_void, + value: ::core::ffi::c_int, + sizeBytes: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemsetAsync_spt( + dst: *mut ::core::ffi::c_void, + value: ::core::ffi::c_int, + sizeBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemset2D_spt( + dst: *mut ::core::ffi::c_void, + pitch: usize, + value: ::core::ffi::c_int, + width: usize, + height: usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemset2DAsync_spt( + dst: *mut ::core::ffi::c_void, + pitch: usize, + value: ::core::ffi::c_int, + width: usize, + height: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemset3DAsync_spt( + pitchedDevPtr: hipPitchedPtr, + value: ::core::ffi::c_int, + extent: hipExtent, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemset3D_spt( + pitchedDevPtr: hipPitchedPtr, + value: ::core::ffi::c_int, + extent: hipExtent, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpyAsync_spt( + dst: *mut ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy3DAsync_spt( + p: *const hipMemcpy3DParms, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy2DAsync_spt( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpyFromSymbolAsync_spt( + dst: *mut ::core::ffi::c_void, + symbol: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpyToSymbolAsync_spt( + symbol: *const ::core::ffi::c_void, + src: *const ::core::ffi::c_void, + sizeBytes: usize, + offset: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpyFromArray_spt( + dst: *mut ::core::ffi::c_void, + src: hipArray_const_t, + wOffsetSrc: usize, + hOffset: usize, + count: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy2DToArray_spt( + dst: hipArray_t, + wOffset: usize, + hOffset: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy2DFromArrayAsync_spt( + dst: *mut ::core::ffi::c_void, + dpitch: usize, + src: hipArray_const_t, + wOffsetSrc: usize, + hOffsetSrc: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipMemcpy2DToArrayAsync_spt( + dst: hipArray_t, + wOffset: usize, + hOffset: usize, + src: *const ::core::ffi::c_void, + spitch: usize, + width: usize, + height: usize, + kind: hipMemcpyKind, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamQuery_spt(stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamSynchronize_spt(stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamGetPriority_spt( + stream: hipStream_t, + priority: *mut ::core::ffi::c_int, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamWaitEvent_spt( + stream: hipStream_t, + event: hipEvent_t, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamGetFlags_spt( + stream: hipStream_t, + flags: *mut ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamAddCallback_spt( + stream: hipStream_t, + callback: hipStreamCallback_t, + userData: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_uint, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipEventRecord_spt(event: hipEvent_t, stream: hipStream_t) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipLaunchCooperativeKernel_spt( + f: *const ::core::ffi::c_void, + gridDim: dim3, + blockDim: dim3, + kernelParams: *mut *mut ::core::ffi::c_void, + sharedMemBytes: u32, + hStream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipLaunchKernel_spt( + function_address: *const ::core::ffi::c_void, + numBlocks: dim3, + dimBlocks: dim3, + args: *mut *mut ::core::ffi::c_void, + sharedMemBytes: usize, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipGraphLaunch_spt( + graphExec: hipGraphExec_t, + stream: hipStream_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamBeginCapture_spt( + stream: hipStream_t, + mode: hipStreamCaptureMode, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamEndCapture_spt( + stream: hipStream_t, + pGraph: *mut hipGraph_t, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamIsCapturing_spt( + stream: hipStream_t, + pCaptureStatus: *mut hipStreamCaptureStatus, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamGetCaptureInfo_spt( + stream: hipStream_t, + pCaptureStatus: *mut hipStreamCaptureStatus, + pId: *mut ::core::ffi::c_ulonglong, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipStreamGetCaptureInfo_v2_spt( + stream: hipStream_t, + captureStatus_out: *mut hipStreamCaptureStatus, + id_out: *mut ::core::ffi::c_ulonglong, + graph_out: *mut hipGraph_t, + dependencies_out: *mut *const hipGraphNode_t, + numDependencies_out: *mut usize, + ) -> hipError_t; +} +extern "C" { + #[must_use] + pub fn hipLaunchHostFunc_spt( + stream: hipStream_t, + fn_: hipHostFn_t, + userData: *mut ::core::ffi::c_void, + ) -> hipError_t; +} +impl hipErrorCode_t { + pub const InvalidValue: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(1) + }); + pub const OutOfMemory: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(2) + }); + pub const MemoryAllocation: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(2) + }); + pub const NotInitialized: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(3) + }); + pub const InitializationError: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(3) + }); + pub const Deinitialized: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(4) + }); + pub const ProfilerDisabled: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(5) + }); + pub const ProfilerNotInitialized: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(6) + }); + pub const ProfilerAlreadyStarted: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(7) + }); + pub const ProfilerAlreadyStopped: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(8) + }); + pub const InvalidConfiguration: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(9) + }); + pub const InvalidPitchValue: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(12) + }); + pub const InvalidSymbol: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(13) + }); + pub const InvalidDevicePointer: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(17) + }); + pub const InvalidMemcpyDirection: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(21) + }); + pub const InsufficientDriver: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(35) + }); + pub const MissingConfiguration: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(52) + }); + pub const PriorLaunchFailure: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(53) + }); + pub const InvalidDeviceFunction: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(98) + }); + pub const NoDevice: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(100) + }); + pub const InvalidDevice: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(101) + }); + pub const InvalidImage: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(200) + }); + pub const InvalidContext: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(201) + }); + pub const ContextAlreadyCurrent: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(202) + }); + pub const MapFailed: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(205) + }); + pub const MapBufferObjectFailed: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(205) + }); + pub const UnmapFailed: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(206) + }); + pub const ArrayIsMapped: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(207) + }); + pub const AlreadyMapped: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(208) + }); + pub const NoBinaryForGpu: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(209) + }); + pub const AlreadyAcquired: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(210) + }); + pub const NotMapped: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(211) + }); + pub const NotMappedAsArray: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(212) + }); + pub const NotMappedAsPointer: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(213) + }); + pub const ECCNotCorrectable: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(214) + }); + pub const UnsupportedLimit: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(215) + }); + pub const ContextAlreadyInUse: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(216) + }); + pub const PeerAccessUnsupported: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(217) + }); + pub const InvalidKernelFile: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(218) + }); + pub const InvalidGraphicsContext: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(219) + }); + pub const InvalidSource: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(300) + }); + pub const FileNotFound: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(301) + }); + pub const SharedObjectSymbolNotFound: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(302) + }); + pub const SharedObjectInitFailed: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(303) + }); + pub const OperatingSystem: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(304) + }); + pub const InvalidHandle: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(400) + }); + pub const InvalidResourceHandle: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(400) + }); + pub const IllegalState: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(401) + }); + pub const NotFound: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(500) + }); + pub const NotReady: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(600) + }); + pub const IllegalAddress: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(700) + }); + pub const LaunchOutOfResources: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(701) + }); + pub const LaunchTimeOut: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(702) + }); + pub const PeerAccessAlreadyEnabled: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(704) + }); + pub const PeerAccessNotEnabled: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(705) + }); + pub const SetOnActiveProcess: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(708) + }); + pub const ContextIsDestroyed: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(709) + }); + pub const Assert: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(710) + }); + pub const HostMemoryAlreadyRegistered: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(712) + }); + pub const HostMemoryNotRegistered: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(713) + }); + pub const LaunchFailure: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(719) + }); + pub const CooperativeLaunchTooLarge: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(720) + }); + pub const NotSupported: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(801) + }); + pub const StreamCaptureUnsupported: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(900) + }); + pub const StreamCaptureInvalidated: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(901) + }); + pub const StreamCaptureMerge: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(902) + }); + pub const StreamCaptureUnmatched: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(903) + }); + pub const StreamCaptureUnjoined: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(904) + }); + pub const StreamCaptureIsolation: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(905) + }); + pub const StreamCaptureImplicit: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(906) + }); + pub const CapturedEvent: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(907) + }); + pub const StreamCaptureWrongThread: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(908) + }); + pub const GraphExecUpdateFailure: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(910) + }); + pub const Unknown: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(999) + }); + pub const RuntimeMemory: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(1052) + }); + pub const RuntimeOther: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(1053) + }); + pub const Tbd: hipErrorCode_t = hipErrorCode_t(unsafe { + ::core::num::NonZeroU32::new_unchecked(1054) + }); +} +#[repr(transparent)] +#[derive(Debug, Hash, Copy, Clone, PartialEq, Eq)] +pub struct hipErrorCode_t(pub ::core::num::NonZeroU32); +pub trait hipError_tConsts { + const Success: hipError_t = hipError_t::Ok(()); + const ErrorInvalidValue: hipError_t = hipError_t::Err(hipErrorCode_t::InvalidValue); + const ErrorOutOfMemory: hipError_t = hipError_t::Err(hipErrorCode_t::OutOfMemory); + const ErrorMemoryAllocation: hipError_t = hipError_t::Err( + hipErrorCode_t::MemoryAllocation, + ); + const ErrorNotInitialized: hipError_t = hipError_t::Err( + hipErrorCode_t::NotInitialized, + ); + const ErrorInitializationError: hipError_t = hipError_t::Err( + hipErrorCode_t::InitializationError, + ); + const ErrorDeinitialized: hipError_t = hipError_t::Err( + hipErrorCode_t::Deinitialized, + ); + const ErrorProfilerDisabled: hipError_t = hipError_t::Err( + hipErrorCode_t::ProfilerDisabled, + ); + const ErrorProfilerNotInitialized: hipError_t = hipError_t::Err( + hipErrorCode_t::ProfilerNotInitialized, + ); + const ErrorProfilerAlreadyStarted: hipError_t = hipError_t::Err( + hipErrorCode_t::ProfilerAlreadyStarted, + ); + const ErrorProfilerAlreadyStopped: hipError_t = hipError_t::Err( + hipErrorCode_t::ProfilerAlreadyStopped, + ); + const ErrorInvalidConfiguration: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidConfiguration, + ); + const ErrorInvalidPitchValue: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidPitchValue, + ); + const ErrorInvalidSymbol: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidSymbol, + ); + const ErrorInvalidDevicePointer: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidDevicePointer, + ); + const ErrorInvalidMemcpyDirection: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidMemcpyDirection, + ); + const ErrorInsufficientDriver: hipError_t = hipError_t::Err( + hipErrorCode_t::InsufficientDriver, + ); + const ErrorMissingConfiguration: hipError_t = hipError_t::Err( + hipErrorCode_t::MissingConfiguration, + ); + const ErrorPriorLaunchFailure: hipError_t = hipError_t::Err( + hipErrorCode_t::PriorLaunchFailure, + ); + const ErrorInvalidDeviceFunction: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidDeviceFunction, + ); + const ErrorNoDevice: hipError_t = hipError_t::Err(hipErrorCode_t::NoDevice); + const ErrorInvalidDevice: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidDevice, + ); + const ErrorInvalidImage: hipError_t = hipError_t::Err(hipErrorCode_t::InvalidImage); + const ErrorInvalidContext: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidContext, + ); + const ErrorContextAlreadyCurrent: hipError_t = hipError_t::Err( + hipErrorCode_t::ContextAlreadyCurrent, + ); + const ErrorMapFailed: hipError_t = hipError_t::Err(hipErrorCode_t::MapFailed); + const ErrorMapBufferObjectFailed: hipError_t = hipError_t::Err( + hipErrorCode_t::MapBufferObjectFailed, + ); + const ErrorUnmapFailed: hipError_t = hipError_t::Err(hipErrorCode_t::UnmapFailed); + const ErrorArrayIsMapped: hipError_t = hipError_t::Err( + hipErrorCode_t::ArrayIsMapped, + ); + const ErrorAlreadyMapped: hipError_t = hipError_t::Err( + hipErrorCode_t::AlreadyMapped, + ); + const ErrorNoBinaryForGpu: hipError_t = hipError_t::Err( + hipErrorCode_t::NoBinaryForGpu, + ); + const ErrorAlreadyAcquired: hipError_t = hipError_t::Err( + hipErrorCode_t::AlreadyAcquired, + ); + const ErrorNotMapped: hipError_t = hipError_t::Err(hipErrorCode_t::NotMapped); + const ErrorNotMappedAsArray: hipError_t = hipError_t::Err( + hipErrorCode_t::NotMappedAsArray, + ); + const ErrorNotMappedAsPointer: hipError_t = hipError_t::Err( + hipErrorCode_t::NotMappedAsPointer, + ); + const ErrorECCNotCorrectable: hipError_t = hipError_t::Err( + hipErrorCode_t::ECCNotCorrectable, + ); + const ErrorUnsupportedLimit: hipError_t = hipError_t::Err( + hipErrorCode_t::UnsupportedLimit, + ); + const ErrorContextAlreadyInUse: hipError_t = hipError_t::Err( + hipErrorCode_t::ContextAlreadyInUse, + ); + const ErrorPeerAccessUnsupported: hipError_t = hipError_t::Err( + hipErrorCode_t::PeerAccessUnsupported, + ); + const ErrorInvalidKernelFile: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidKernelFile, + ); + const ErrorInvalidGraphicsContext: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidGraphicsContext, + ); + const ErrorInvalidSource: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidSource, + ); + const ErrorFileNotFound: hipError_t = hipError_t::Err(hipErrorCode_t::FileNotFound); + const ErrorSharedObjectSymbolNotFound: hipError_t = hipError_t::Err( + hipErrorCode_t::SharedObjectSymbolNotFound, + ); + const ErrorSharedObjectInitFailed: hipError_t = hipError_t::Err( + hipErrorCode_t::SharedObjectInitFailed, + ); + const ErrorOperatingSystem: hipError_t = hipError_t::Err( + hipErrorCode_t::OperatingSystem, + ); + const ErrorInvalidHandle: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidHandle, + ); + const ErrorInvalidResourceHandle: hipError_t = hipError_t::Err( + hipErrorCode_t::InvalidResourceHandle, + ); + const ErrorIllegalState: hipError_t = hipError_t::Err(hipErrorCode_t::IllegalState); + const ErrorNotFound: hipError_t = hipError_t::Err(hipErrorCode_t::NotFound); + const ErrorNotReady: hipError_t = hipError_t::Err(hipErrorCode_t::NotReady); + const ErrorIllegalAddress: hipError_t = hipError_t::Err( + hipErrorCode_t::IllegalAddress, + ); + const ErrorLaunchOutOfResources: hipError_t = hipError_t::Err( + hipErrorCode_t::LaunchOutOfResources, + ); + const ErrorLaunchTimeOut: hipError_t = hipError_t::Err( + hipErrorCode_t::LaunchTimeOut, + ); + const ErrorPeerAccessAlreadyEnabled: hipError_t = hipError_t::Err( + hipErrorCode_t::PeerAccessAlreadyEnabled, + ); + const ErrorPeerAccessNotEnabled: hipError_t = hipError_t::Err( + hipErrorCode_t::PeerAccessNotEnabled, + ); + const ErrorSetOnActiveProcess: hipError_t = hipError_t::Err( + hipErrorCode_t::SetOnActiveProcess, + ); + const ErrorContextIsDestroyed: hipError_t = hipError_t::Err( + hipErrorCode_t::ContextIsDestroyed, + ); + const ErrorAssert: hipError_t = hipError_t::Err(hipErrorCode_t::Assert); + const ErrorHostMemoryAlreadyRegistered: hipError_t = hipError_t::Err( + hipErrorCode_t::HostMemoryAlreadyRegistered, + ); + const ErrorHostMemoryNotRegistered: hipError_t = hipError_t::Err( + hipErrorCode_t::HostMemoryNotRegistered, + ); + const ErrorLaunchFailure: hipError_t = hipError_t::Err( + hipErrorCode_t::LaunchFailure, + ); + const ErrorCooperativeLaunchTooLarge: hipError_t = hipError_t::Err( + hipErrorCode_t::CooperativeLaunchTooLarge, + ); + const ErrorNotSupported: hipError_t = hipError_t::Err(hipErrorCode_t::NotSupported); + const ErrorStreamCaptureUnsupported: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureUnsupported, + ); + const ErrorStreamCaptureInvalidated: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureInvalidated, + ); + const ErrorStreamCaptureMerge: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureMerge, + ); + const ErrorStreamCaptureUnmatched: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureUnmatched, + ); + const ErrorStreamCaptureUnjoined: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureUnjoined, + ); + const ErrorStreamCaptureIsolation: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureIsolation, + ); + const ErrorStreamCaptureImplicit: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureImplicit, + ); + const ErrorCapturedEvent: hipError_t = hipError_t::Err( + hipErrorCode_t::CapturedEvent, + ); + const ErrorStreamCaptureWrongThread: hipError_t = hipError_t::Err( + hipErrorCode_t::StreamCaptureWrongThread, + ); + const ErrorGraphExecUpdateFailure: hipError_t = hipError_t::Err( + hipErrorCode_t::GraphExecUpdateFailure, + ); + const ErrorUnknown: hipError_t = hipError_t::Err(hipErrorCode_t::Unknown); + const ErrorRuntimeMemory: hipError_t = hipError_t::Err( + hipErrorCode_t::RuntimeMemory, + ); + const ErrorRuntimeOther: hipError_t = hipError_t::Err(hipErrorCode_t::RuntimeOther); + const ErrorTbd: hipError_t = hipError_t::Err(hipErrorCode_t::Tbd); +} +impl hipError_tConsts for hipError_t {} +#[must_use] +pub type hipError_t = ::core::result::Result<(), hipErrorCode_t>; +const _: fn() = || { + let _ = std::mem::transmute::; +}; +unsafe impl Send for hipDeviceptr_t {} +unsafe impl Sync for hipDeviceptr_t {} +unsafe impl Send for hipStream_t {} +unsafe impl Sync for hipStream_t {} +unsafe impl Send for hipModule_t {} +unsafe impl Sync for hipModule_t {} +unsafe impl Send for hipFunction_t {} +unsafe impl Sync for hipFunction_t {} diff --git a/ext/spirv-headers b/ext/spirv-headers deleted file mode 160000 index 308bd074..00000000 --- a/ext/spirv-headers +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 308bd07424350a6000f35a77b5f85cd4f3da319e diff --git a/ext/spirv-tools b/ext/spirv-tools deleted file mode 160000 index e128ab0d..00000000 --- a/ext/spirv-tools +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e128ab0d624ce7beb08eb9656bb260c597a46d0a diff --git a/level_zero-sys/Cargo.toml b/level_zero-sys/Cargo.toml deleted file mode 100644 index 7f8b497a..00000000 --- a/level_zero-sys/Cargo.toml +++ /dev/null @@ -1,8 +0,0 @@ -[package] -name = "level_zero-sys" -version = "1.0.4" -authors = ["Andrzej Janik "] -edition = "2018" -links = "ze_loader" - -[lib] \ No newline at end of file diff --git a/level_zero-sys/README b/level_zero-sys/README deleted file mode 100644 index 16d29d77..00000000 --- a/level_zero-sys/README +++ /dev/null @@ -1,4 +0,0 @@ -sed 's/^typedef uint32_t ze_.*flags_t;$//g' include/ze_api.h > include/level_zero/ze_api.h -sed -i -r 's/ze_(.*)_flag_t/ze_\1_flags_t/g' include/level_zero/ze_api.h -bindgen --with-derive-default --no-default ".*format_t" --no-default ".*fd_t" --no-default ".*constants_t" --no-default ".*handle_t" --no-default ".*desc_t" --no-default ".*params_t" --size_t-is-usize --default-enum-style=newtype --bitfield-enum ".*flags_t" --whitelist-function "ze.*" --whitelist-type "ze.*" include/level_zero/ze_api.h -o src/ze_api.rs -- -Iinclude -sed -i 's/pub struct _ze_result_t/#[must_use]\npub struct _ze_result_t/g' src/ze_api.rs diff --git a/level_zero-sys/build.rs b/level_zero-sys/build.rs deleted file mode 100644 index 0d2488cd..00000000 --- a/level_zero-sys/build.rs +++ /dev/null @@ -1,17 +0,0 @@ -use env::VarError; -use std::{env, path::PathBuf}; - -fn main() -> Result<(), VarError> { - println!("cargo:rustc-link-lib=dylib=ze_loader"); - if cfg!(windows) { - let env = env::var("CARGO_CFG_TARGET_ENV")?; - if env == "msvc" { - let mut path = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?); - path.push("lib"); - println!("cargo:rustc-link-search=native={}", path.display()); - } else { - println!("cargo:rustc-link-search=native=C:\\Windows\\System32"); - }; - } - Ok(()) -} diff --git a/level_zero-sys/include/level_zero/.gitignore b/level_zero-sys/include/level_zero/.gitignore deleted file mode 100644 index d9e8bb71..00000000 --- a/level_zero-sys/include/level_zero/.gitignore +++ /dev/null @@ -1 +0,0 @@ -ze_api.h \ No newline at end of file diff --git a/level_zero-sys/include/ze_api.h b/level_zero-sys/include/ze_api.h deleted file mode 100644 index 6107f100..00000000 --- a/level_zero-sys/include/ze_api.h +++ /dev/null @@ -1,9273 +0,0 @@ -/* - * - * Copyright (C) 2019 Intel Corporation - * - * SPDX-License-Identifier: MIT - * - * @file ze_api.h - * @version v1.1-r1.1.10 - * - */ -#ifndef _ZE_API_H -#define _ZE_API_H -#if defined(__cplusplus) -#pragma once -#endif - -// standard headers -#include -#include - -#if defined(__cplusplus) -extern "C" { -#endif - -// Intel 'oneAPI' Level-Zero API common types -#if !defined(__GNUC__) -#pragma region common -#endif -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAKE_VERSION -/// @brief Generates generic 'oneAPI' API versions -#define ZE_MAKE_VERSION( _major, _minor ) (( _major << 16 )|( _minor & 0x0000ffff)) -#endif // ZE_MAKE_VERSION - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAJOR_VERSION -/// @brief Extracts 'oneAPI' API major version -#define ZE_MAJOR_VERSION( _ver ) ( _ver >> 16 ) -#endif // ZE_MAJOR_VERSION - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MINOR_VERSION -/// @brief Extracts 'oneAPI' API minor version -#define ZE_MINOR_VERSION( _ver ) ( _ver & 0x0000ffff ) -#endif // ZE_MINOR_VERSION - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_APICALL -#if defined(_WIN32) -/// @brief Calling convention for all API functions -#define ZE_APICALL __cdecl -#else -#define ZE_APICALL -#endif // defined(_WIN32) -#endif // ZE_APICALL - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_APIEXPORT -#if defined(_WIN32) -/// @brief Microsoft-specific dllexport storage-class attribute -#define ZE_APIEXPORT __declspec(dllexport) -#else -#define ZE_APIEXPORT -#endif // defined(_WIN32) -#endif // ZE_APIEXPORT - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_DLLEXPORT -#if defined(_WIN32) -/// @brief Microsoft-specific dllexport storage-class attribute -#define ZE_DLLEXPORT __declspec(dllexport) -#endif // defined(_WIN32) -#endif // ZE_DLLEXPORT - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_DLLEXPORT -#if __GNUC__ >= 4 -/// @brief GCC-specific dllexport storage-class attribute -#define ZE_DLLEXPORT __attribute__ ((visibility ("default"))) -#else -#define ZE_DLLEXPORT -#endif // __GNUC__ >= 4 -#endif // ZE_DLLEXPORT - -/////////////////////////////////////////////////////////////////////////////// -/// @brief compiler-independent type -typedef uint8_t ze_bool_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of a driver instance -typedef struct _ze_driver_handle_t *ze_driver_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's device object -typedef struct _ze_device_handle_t *ze_device_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's context object -typedef struct _ze_context_handle_t *ze_context_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's command queue object -typedef struct _ze_command_queue_handle_t *ze_command_queue_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's command list object -typedef struct _ze_command_list_handle_t *ze_command_list_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's fence object -typedef struct _ze_fence_handle_t *ze_fence_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's event pool object -typedef struct _ze_event_pool_handle_t *ze_event_pool_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's event object -typedef struct _ze_event_handle_t *ze_event_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's image object -typedef struct _ze_image_handle_t *ze_image_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's module object -typedef struct _ze_module_handle_t *ze_module_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of module's build log object -typedef struct _ze_module_build_log_handle_t *ze_module_build_log_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's kernel object -typedef struct _ze_kernel_handle_t *ze_kernel_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of driver's sampler object -typedef struct _ze_sampler_handle_t *ze_sampler_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Handle of physical memory object -typedef struct _ze_physical_mem_handle_t *ze_physical_mem_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_IPC_HANDLE_SIZE -/// @brief Maximum IPC handle size -#define ZE_MAX_IPC_HANDLE_SIZE 64 -#endif // ZE_MAX_IPC_HANDLE_SIZE - -/////////////////////////////////////////////////////////////////////////////// -/// @brief IPC handle to a memory allocation -typedef struct _ze_ipc_mem_handle_t -{ - char data[ZE_MAX_IPC_HANDLE_SIZE]; ///< [out] Opaque data representing an IPC handle - -} ze_ipc_mem_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief IPC handle to a event pool allocation -typedef struct _ze_ipc_event_pool_handle_t -{ - char data[ZE_MAX_IPC_HANDLE_SIZE]; ///< [out] Opaque data representing an IPC handle - -} ze_ipc_event_pool_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_BIT -/// @brief Generic macro for enumerator bit masks -#define ZE_BIT( _i ) ( 1 << _i ) -#endif // ZE_BIT - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Defines Return/Error codes -typedef enum _ze_result_t -{ - ZE_RESULT_SUCCESS = 0, ///< [Core] success - ZE_RESULT_NOT_READY = 1, ///< [Core] synchronization primitive not signaled - ZE_RESULT_ERROR_DEVICE_LOST = 0x70000001, ///< [Core] device hung, reset, was removed, or driver update occurred - ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY = 0x70000002,///< [Core] insufficient host memory to satisfy call - ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY = 0x70000003, ///< [Core] insufficient device memory to satisfy call - ZE_RESULT_ERROR_MODULE_BUILD_FAILURE = 0x70000004, ///< [Core] error occurred when building module, see build log for details - ZE_RESULT_ERROR_MODULE_LINK_FAILURE = 0x70000005, ///< [Core] error occurred when linking modules, see build log for details - ZE_RESULT_ERROR_INSUFFICIENT_PERMISSIONS = 0x70010000, ///< [Sysman] access denied due to permission level - ZE_RESULT_ERROR_NOT_AVAILABLE = 0x70010001, ///< [Sysman] resource already in use and simultaneous access not allowed - ///< or resource was removed - ZE_RESULT_ERROR_DEPENDENCY_UNAVAILABLE = 0x70020000,///< [Tools] external required dependency is unavailable or missing - ZE_RESULT_ERROR_UNINITIALIZED = 0x78000001, ///< [Validation] driver is not initialized - ZE_RESULT_ERROR_UNSUPPORTED_VERSION = 0x78000002, ///< [Validation] generic error code for unsupported versions - ZE_RESULT_ERROR_UNSUPPORTED_FEATURE = 0x78000003, ///< [Validation] generic error code for unsupported features - ZE_RESULT_ERROR_INVALID_ARGUMENT = 0x78000004, ///< [Validation] generic error code for invalid arguments - ZE_RESULT_ERROR_INVALID_NULL_HANDLE = 0x78000005, ///< [Validation] handle argument is not valid - ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE = 0x78000006, ///< [Validation] object pointed to by handle still in-use by device - ZE_RESULT_ERROR_INVALID_NULL_POINTER = 0x78000007, ///< [Validation] pointer argument may not be nullptr - ZE_RESULT_ERROR_INVALID_SIZE = 0x78000008, ///< [Validation] size argument is invalid (e.g., must not be zero) - ZE_RESULT_ERROR_UNSUPPORTED_SIZE = 0x78000009, ///< [Validation] size argument is not supported by the device (e.g., too - ///< large) - ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT = 0x7800000a, ///< [Validation] alignment argument is not supported by the device (e.g., - ///< too small) - ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT = 0x7800000b,///< [Validation] synchronization object in invalid state - ZE_RESULT_ERROR_INVALID_ENUMERATION = 0x7800000c, ///< [Validation] enumerator argument is not valid - ZE_RESULT_ERROR_UNSUPPORTED_ENUMERATION = 0x7800000d, ///< [Validation] enumerator argument is not supported by the device - ZE_RESULT_ERROR_UNSUPPORTED_IMAGE_FORMAT = 0x7800000e, ///< [Validation] image format is not supported by the device - ZE_RESULT_ERROR_INVALID_NATIVE_BINARY = 0x7800000f, ///< [Validation] native binary is not supported by the device - ZE_RESULT_ERROR_INVALID_GLOBAL_NAME = 0x78000010, ///< [Validation] global variable is not found in the module - ZE_RESULT_ERROR_INVALID_KERNEL_NAME = 0x78000011, ///< [Validation] kernel name is not found in the module - ZE_RESULT_ERROR_INVALID_FUNCTION_NAME = 0x78000012, ///< [Validation] function name is not found in the module - ZE_RESULT_ERROR_INVALID_GROUP_SIZE_DIMENSION = 0x78000013, ///< [Validation] group size dimension is not valid for the kernel or - ///< device - ZE_RESULT_ERROR_INVALID_GLOBAL_WIDTH_DIMENSION = 0x78000014,///< [Validation] global width dimension is not valid for the kernel or - ///< device - ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_INDEX = 0x78000015, ///< [Validation] kernel argument index is not valid for kernel - ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_SIZE = 0x78000016, ///< [Validation] kernel argument size does not match kernel - ZE_RESULT_ERROR_INVALID_KERNEL_ATTRIBUTE_VALUE = 0x78000017,///< [Validation] value of kernel attribute is not valid for the kernel or - ///< device - ZE_RESULT_ERROR_INVALID_MODULE_UNLINKED = 0x78000018, ///< [Validation] module with imports needs to be linked before kernels can - ///< be created from it. - ZE_RESULT_ERROR_INVALID_COMMAND_LIST_TYPE = 0x78000019, ///< [Validation] command list type does not match command queue type - ZE_RESULT_ERROR_OVERLAPPING_REGIONS = 0x7800001a, ///< [Validation] copy operations do not support overlapping regions of - ///< memory - ZE_RESULT_ERROR_UNKNOWN = 0x7ffffffe, ///< [Core] unknown or internal error - ZE_RESULT_FORCE_UINT32 = 0x7fffffff - -} ze_result_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Defines structure types -typedef enum _ze_structure_type_t -{ - ZE_STRUCTURE_TYPE_DRIVER_PROPERTIES = 0x1, ///< ::ze_driver_properties_t - ZE_STRUCTURE_TYPE_DRIVER_IPC_PROPERTIES = 0x2, ///< ::ze_driver_ipc_properties_t - ZE_STRUCTURE_TYPE_DEVICE_PROPERTIES = 0x3, ///< ::ze_device_properties_t - ZE_STRUCTURE_TYPE_DEVICE_COMPUTE_PROPERTIES = 0x4, ///< ::ze_device_compute_properties_t - ZE_STRUCTURE_TYPE_DEVICE_MODULE_PROPERTIES = 0x5, ///< ::ze_device_module_properties_t - ZE_STRUCTURE_TYPE_COMMAND_QUEUE_GROUP_PROPERTIES = 0x6, ///< ::ze_command_queue_group_properties_t - ZE_STRUCTURE_TYPE_DEVICE_MEMORY_PROPERTIES = 0x7, ///< ::ze_device_memory_properties_t - ZE_STRUCTURE_TYPE_DEVICE_MEMORY_ACCESS_PROPERTIES = 0x8,///< ::ze_device_memory_access_properties_t - ZE_STRUCTURE_TYPE_DEVICE_CACHE_PROPERTIES = 0x9,///< ::ze_device_cache_properties_t - ZE_STRUCTURE_TYPE_DEVICE_IMAGE_PROPERTIES = 0xa,///< ::ze_device_image_properties_t - ZE_STRUCTURE_TYPE_DEVICE_P2P_PROPERTIES = 0xb, ///< ::ze_device_p2p_properties_t - ZE_STRUCTURE_TYPE_DEVICE_EXTERNAL_MEMORY_PROPERTIES = 0xc, ///< ::ze_device_external_memory_properties_t - ZE_STRUCTURE_TYPE_CONTEXT_DESC = 0xd, ///< ::ze_context_desc_t - ZE_STRUCTURE_TYPE_COMMAND_QUEUE_DESC = 0xe, ///< ::ze_command_queue_desc_t - ZE_STRUCTURE_TYPE_COMMAND_LIST_DESC = 0xf, ///< ::ze_command_list_desc_t - ZE_STRUCTURE_TYPE_EVENT_POOL_DESC = 0x10, ///< ::ze_event_pool_desc_t - ZE_STRUCTURE_TYPE_EVENT_DESC = 0x11, ///< ::ze_event_desc_t - ZE_STRUCTURE_TYPE_FENCE_DESC = 0x12, ///< ::ze_fence_desc_t - ZE_STRUCTURE_TYPE_IMAGE_DESC = 0x13, ///< ::ze_image_desc_t - ZE_STRUCTURE_TYPE_IMAGE_PROPERTIES = 0x14, ///< ::ze_image_properties_t - ZE_STRUCTURE_TYPE_DEVICE_MEM_ALLOC_DESC = 0x15, ///< ::ze_device_mem_alloc_desc_t - ZE_STRUCTURE_TYPE_HOST_MEM_ALLOC_DESC = 0x16, ///< ::ze_host_mem_alloc_desc_t - ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES = 0x17, ///< ::ze_memory_allocation_properties_t - ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_EXPORT_DESC = 0x18, ///< ::ze_external_memory_export_desc_t - ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMPORT_FD = 0x19, ///< ::ze_external_memory_import_fd_t - ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_EXPORT_FD = 0x1a, ///< ::ze_external_memory_export_fd_t - ZE_STRUCTURE_TYPE_MODULE_DESC = 0x1b, ///< ::ze_module_desc_t - ZE_STRUCTURE_TYPE_MODULE_PROPERTIES = 0x1c, ///< ::ze_module_properties_t - ZE_STRUCTURE_TYPE_KERNEL_DESC = 0x1d, ///< ::ze_kernel_desc_t - ZE_STRUCTURE_TYPE_KERNEL_PROPERTIES = 0x1e, ///< ::ze_kernel_properties_t - ZE_STRUCTURE_TYPE_SAMPLER_DESC = 0x1f, ///< ::ze_sampler_desc_t - ZE_STRUCTURE_TYPE_PHYSICAL_MEM_DESC = 0x20, ///< ::ze_physical_mem_desc_t - ZE_STRUCTURE_TYPE_DEVICE_RAYTRACING_EXT_PROPERTIES = 0x00010001,///< ::ze_device_raytracing_ext_properties_t - ZE_STRUCTURE_TYPE_RAYTRACING_MEM_ALLOC_EXT_DESC = 0x10002, ///< ::ze_raytracing_mem_alloc_ext_desc_t - ZE_STRUCTURE_TYPE_FLOAT_ATOMIC_EXT_PROPERTIES = 0x10003,///< ::ze_float_atomic_ext_properties_t - ZE_STRUCTURE_TYPE_RELAXED_ALLOCATION_LIMITS_EXP_DESC = 0x00020001, ///< ::ze_relaxed_allocation_limits_exp_desc_t - ZE_STRUCTURE_TYPE_MODULE_PROGRAM_EXP_DESC = 0x00020002, ///< ::ze_module_program_exp_desc_t - ZE_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff - -} ze_structure_type_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief External memory type flags -typedef uint32_t ze_external_memory_type_flags_t; -typedef enum _ze_external_memory_type_flag_t -{ - ZE_EXTERNAL_MEMORY_TYPE_FLAG_OPAQUE_FD = ZE_BIT(0), ///< an opaque POSIX file descriptor handle - ZE_EXTERNAL_MEMORY_TYPE_FLAG_DMA_BUF = ZE_BIT(1), ///< a file descriptor handle for a Linux dma_buf - ZE_EXTERNAL_MEMORY_TYPE_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_external_memory_type_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Base for all properties types -typedef struct _ze_base_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - -} ze_base_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Base for all descriptor types -typedef struct _ze_base_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - -} ze_base_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forces driver to only report devices (and sub-devices) as specified by -/// values - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forces driver to report devices from lowest to highest PCI bus ID - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forces all shared allocations into device memory - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_ipc_mem_handle_t -typedef struct _ze_ipc_mem_handle_t ze_ipc_mem_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_ipc_event_pool_handle_t -typedef struct _ze_ipc_event_pool_handle_t ze_ipc_event_pool_handle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_base_properties_t -typedef struct _ze_base_properties_t ze_base_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_base_desc_t -typedef struct _ze_base_desc_t ze_base_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_driver_uuid_t -typedef struct _ze_driver_uuid_t ze_driver_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_driver_properties_t -typedef struct _ze_driver_properties_t ze_driver_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_driver_ipc_properties_t -typedef struct _ze_driver_ipc_properties_t ze_driver_ipc_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_driver_extension_properties_t -typedef struct _ze_driver_extension_properties_t ze_driver_extension_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_uuid_t -typedef struct _ze_device_uuid_t ze_device_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_properties_t -typedef struct _ze_device_properties_t ze_device_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_thread_t -typedef struct _ze_device_thread_t ze_device_thread_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_compute_properties_t -typedef struct _ze_device_compute_properties_t ze_device_compute_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_native_kernel_uuid_t -typedef struct _ze_native_kernel_uuid_t ze_native_kernel_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_module_properties_t -typedef struct _ze_device_module_properties_t ze_device_module_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_command_queue_group_properties_t -typedef struct _ze_command_queue_group_properties_t ze_command_queue_group_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_memory_properties_t -typedef struct _ze_device_memory_properties_t ze_device_memory_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_memory_access_properties_t -typedef struct _ze_device_memory_access_properties_t ze_device_memory_access_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_cache_properties_t -typedef struct _ze_device_cache_properties_t ze_device_cache_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_image_properties_t -typedef struct _ze_device_image_properties_t ze_device_image_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_external_memory_properties_t -typedef struct _ze_device_external_memory_properties_t ze_device_external_memory_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_p2p_properties_t -typedef struct _ze_device_p2p_properties_t ze_device_p2p_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_context_desc_t -typedef struct _ze_context_desc_t ze_context_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_command_queue_desc_t -typedef struct _ze_command_queue_desc_t ze_command_queue_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_command_list_desc_t -typedef struct _ze_command_list_desc_t ze_command_list_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_copy_region_t -typedef struct _ze_copy_region_t ze_copy_region_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_image_region_t -typedef struct _ze_image_region_t ze_image_region_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_event_pool_desc_t -typedef struct _ze_event_pool_desc_t ze_event_pool_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_event_desc_t -typedef struct _ze_event_desc_t ze_event_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_kernel_timestamp_data_t -typedef struct _ze_kernel_timestamp_data_t ze_kernel_timestamp_data_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_kernel_timestamp_result_t -typedef struct _ze_kernel_timestamp_result_t ze_kernel_timestamp_result_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_fence_desc_t -typedef struct _ze_fence_desc_t ze_fence_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_image_format_t -typedef struct _ze_image_format_t ze_image_format_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_image_desc_t -typedef struct _ze_image_desc_t ze_image_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_image_properties_t -typedef struct _ze_image_properties_t ze_image_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_mem_alloc_desc_t -typedef struct _ze_device_mem_alloc_desc_t ze_device_mem_alloc_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_host_mem_alloc_desc_t -typedef struct _ze_host_mem_alloc_desc_t ze_host_mem_alloc_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_memory_allocation_properties_t -typedef struct _ze_memory_allocation_properties_t ze_memory_allocation_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_external_memory_export_desc_t -typedef struct _ze_external_memory_export_desc_t ze_external_memory_export_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_external_memory_import_fd_t -typedef struct _ze_external_memory_import_fd_t ze_external_memory_import_fd_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_external_memory_export_fd_t -typedef struct _ze_external_memory_export_fd_t ze_external_memory_export_fd_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_module_constants_t -typedef struct _ze_module_constants_t ze_module_constants_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_module_desc_t -typedef struct _ze_module_desc_t ze_module_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_module_properties_t -typedef struct _ze_module_properties_t ze_module_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_kernel_desc_t -typedef struct _ze_kernel_desc_t ze_kernel_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_kernel_uuid_t -typedef struct _ze_kernel_uuid_t ze_kernel_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_kernel_properties_t -typedef struct _ze_kernel_properties_t ze_kernel_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_group_count_t -typedef struct _ze_group_count_t ze_group_count_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_module_program_exp_desc_t -typedef struct _ze_module_program_exp_desc_t ze_module_program_exp_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_device_raytracing_ext_properties_t -typedef struct _ze_device_raytracing_ext_properties_t ze_device_raytracing_ext_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_raytracing_mem_alloc_ext_desc_t -typedef struct _ze_raytracing_mem_alloc_ext_desc_t ze_raytracing_mem_alloc_ext_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_sampler_desc_t -typedef struct _ze_sampler_desc_t ze_sampler_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_physical_mem_desc_t -typedef struct _ze_physical_mem_desc_t ze_physical_mem_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_float_atomic_ext_properties_t -typedef struct _ze_float_atomic_ext_properties_t ze_float_atomic_ext_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Forward-declare ze_relaxed_allocation_limits_exp_desc_t -typedef struct _ze_relaxed_allocation_limits_exp_desc_t ze_relaxed_allocation_limits_exp_desc_t; - - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs -#if !defined(__GNUC__) -#pragma region driver -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported initialization flags -typedef uint32_t ze_init_flags_t; -typedef enum _ze_init_flag_t -{ - ZE_INIT_FLAG_GPU_ONLY = ZE_BIT(0), ///< only initialize GPU drivers - ZE_INIT_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_init_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Initialize the 'oneAPI' driver(s) -/// -/// @details -/// - The application must call this function before calling any other -/// function. -/// - If this function is not called then all other functions will return -/// ::ZE_RESULT_ERROR_UNINITIALIZED. -/// - Only one instance of each driver will be initialized per process. -/// - The application may call this function multiple times with different -/// flags or environment variables enabled. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe for scenarios -/// where multiple libraries may initialize the driver(s) simultaneously. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < flags` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeInit( - ze_init_flags_t flags ///< [in] initialization flags. - ///< must be 0 (default) or a combination of ::ze_init_flag_t. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves driver instances -/// -/// @details -/// - A driver represents a collection of physical devices. -/// - Multiple calls to this function will return identical driver handles, -/// in the same order. -/// - The application may pass nullptr for pDrivers when only querying the -/// number of drivers. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clGetPlatformIDs -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDriverGet( - uint32_t* pCount, ///< [in,out] pointer to the number of driver instances. - ///< if count is zero, then the loader shall update the value with the - ///< total number of drivers available. - ///< if count is greater than the number of drivers available, then the - ///< loader shall update the value with the correct number of drivers available. - ze_driver_handle_t* phDrivers ///< [in,out][optional][range(0, *pCount)] array of driver instance handles. - ///< if count is less than the number of drivers available, then the loader - ///< shall only retrieve that number of drivers. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported API versions -/// -/// @details -/// - API versions contain major and minor attributes, use -/// ::ZE_MAJOR_VERSION and ::ZE_MINOR_VERSION -typedef enum _ze_api_version_t -{ - ZE_API_VERSION_1_0 = ZE_MAKE_VERSION( 1, 0 ), ///< version 1.0 - ZE_API_VERSION_1_1 = ZE_MAKE_VERSION( 1, 1 ), ///< version 1.1 - ZE_API_VERSION_CURRENT = ZE_MAKE_VERSION( 1, 1 ), ///< latest known version - ZE_API_VERSION_FORCE_UINT32 = 0x7fffffff - -} ze_api_version_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Returns the API version supported by the specified driver -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == version` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDriverGetApiVersion( - ze_driver_handle_t hDriver, ///< [in] handle of the driver instance - ze_api_version_t* version ///< [out] api version - ); - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_DRIVER_UUID_SIZE -/// @brief Maximum driver universal unique id (UUID) size in bytes -#define ZE_MAX_DRIVER_UUID_SIZE 16 -#endif // ZE_MAX_DRIVER_UUID_SIZE - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Driver universal unique id (UUID) -typedef struct _ze_driver_uuid_t -{ - uint8_t id[ZE_MAX_DRIVER_UUID_SIZE]; ///< [out] opaque data representing a driver UUID - -} ze_driver_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Driver properties queried using ::zeDriverGetProperties -typedef struct _ze_driver_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_driver_uuid_t uuid; ///< [out] universal unique identifier. - uint32_t driverVersion; ///< [out] driver version - ///< The driver version is a non-zero, monotonically increasing value where - ///< higher values always indicate a more recent version. - -} ze_driver_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves properties of the driver. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clGetPlatformInfo** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pDriverProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDriverGetProperties( - ze_driver_handle_t hDriver, ///< [in] handle of the driver instance - ze_driver_properties_t* pDriverProperties ///< [in,out] query result for driver properties - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported IPC property flags -typedef uint32_t ze_ipc_property_flags_t; -typedef enum _ze_ipc_property_flag_t -{ - ZE_IPC_PROPERTY_FLAG_MEMORY = ZE_BIT(0), ///< Supports passing memory allocations between processes. See - ///< ::zeMemGetIpcHandle. - ZE_IPC_PROPERTY_FLAG_EVENT_POOL = ZE_BIT(1), ///< Supports passing event pools between processes. See - ///< ::zeEventPoolGetIpcHandle. - ZE_IPC_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_ipc_property_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief IPC properties queried using ::zeDriverGetIpcProperties -typedef struct _ze_driver_ipc_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_ipc_property_flags_t flags; ///< [out] 0 (none) or a valid combination of ::ze_ipc_property_flag_t - -} ze_driver_ipc_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves IPC attributes of the driver -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pIpcProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDriverGetIpcProperties( - ze_driver_handle_t hDriver, ///< [in] handle of the driver instance - ze_driver_ipc_properties_t* pIpcProperties ///< [in,out] query result for IPC properties - ); - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_EXTENSION_NAME -/// @brief Maximum extension name string size -#define ZE_MAX_EXTENSION_NAME 256 -#endif // ZE_MAX_EXTENSION_NAME - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Extension properties queried using ::zeDriverGetExtensionProperties -typedef struct _ze_driver_extension_properties_t -{ - char name[ZE_MAX_EXTENSION_NAME]; ///< [out] extension name - uint32_t version; ///< [out] extension version using ::ZE_MAKE_VERSION - -} ze_driver_extension_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves extension properties -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **vkEnumerateInstanceExtensionProperties** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDriverGetExtensionProperties( - ze_driver_handle_t hDriver, ///< [in] handle of the driver instance - uint32_t* pCount, ///< [in,out] pointer to the number of extension properties. - ///< if count is zero, then the driver shall update the value with the - ///< total number of extension properties available. - ///< if count is greater than the number of extension properties available, - ///< then the driver shall update the value with the correct number of - ///< extension properties available. - ze_driver_extension_properties_t* pExtensionProperties ///< [in,out][optional][range(0, *pCount)] array of query results for - ///< extension properties. - ///< if count is less than the number of extension properties available, - ///< then driver shall only retrieve that number of extension properties. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves function pointer for vendor-specific or experimental -/// extensions -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == name` -/// + `nullptr == ppFunctionAddress` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDriverGetExtensionFunctionAddress( - ze_driver_handle_t hDriver, ///< [in] handle of the driver instance - const char* name, ///< [in] extension name - void** ppFunctionAddress ///< [out] pointer to function pointer - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Device -#if !defined(__GNUC__) -#pragma region device -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves devices within a driver -/// -/// @details -/// - Multiple calls to this function will return identical device handles, -/// in the same order. -/// - The number and order of handles returned from this function is -/// affected by the ::ZE_AFFINITY_MASK and ::ZE_ENABLE_PCI_ID_DEVICE_ORDER -/// environment variables. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGet( - ze_driver_handle_t hDriver, ///< [in] handle of the driver instance - uint32_t* pCount, ///< [in,out] pointer to the number of devices. - ///< if count is zero, then the driver shall update the value with the - ///< total number of devices available. - ///< if count is greater than the number of devices available, then the - ///< driver shall update the value with the correct number of devices available. - ze_device_handle_t* phDevices ///< [in,out][optional][range(0, *pCount)] array of handle of devices. - ///< if count is less than the number of devices available, then driver - ///< shall only retrieve that number of devices. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves a sub-device from a device -/// -/// @details -/// - Multiple calls to this function will return identical device handles, -/// in the same order. -/// - The number of handles returned from this function is affected by the -/// ::ZE_AFFINITY_MASK environment variable. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clCreateSubDevices -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetSubDevices( - ze_device_handle_t hDevice, ///< [in] handle of the device object - uint32_t* pCount, ///< [in,out] pointer to the number of sub-devices. - ///< if count is zero, then the driver shall update the value with the - ///< total number of sub-devices available. - ///< if count is greater than the number of sub-devices available, then the - ///< driver shall update the value with the correct number of sub-devices available. - ze_device_handle_t* phSubdevices ///< [in,out][optional][range(0, *pCount)] array of handle of sub-devices. - ///< if count is less than the number of sub-devices available, then driver - ///< shall only retrieve that number of sub-devices. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported device types -typedef enum _ze_device_type_t -{ - ZE_DEVICE_TYPE_GPU = 1, ///< Graphics Processing Unit - ZE_DEVICE_TYPE_CPU = 2, ///< Central Processing Unit - ZE_DEVICE_TYPE_FPGA = 3, ///< Field Programmable Gate Array - ZE_DEVICE_TYPE_MCA = 4, ///< Memory Copy Accelerator - ZE_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff - -} ze_device_type_t; - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_DEVICE_UUID_SIZE -/// @brief Maximum device universal unique id (UUID) size in bytes -#define ZE_MAX_DEVICE_UUID_SIZE 16 -#endif // ZE_MAX_DEVICE_UUID_SIZE - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device universal unique id (UUID) -typedef struct _ze_device_uuid_t -{ - uint8_t id[ZE_MAX_DEVICE_UUID_SIZE]; ///< [out] opaque data representing a device UUID - -} ze_device_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_DEVICE_NAME -/// @brief Maximum device name string size -#define ZE_MAX_DEVICE_NAME 256 -#endif // ZE_MAX_DEVICE_NAME - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported device property flags -typedef uint32_t ze_device_property_flags_t; -typedef enum _ze_device_property_flag_t -{ - ZE_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0), ///< Device is integrated with the Host. - ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1), ///< Device handle used for query represents a sub-device. - ZE_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2), ///< Device supports error correction memory access. - ZE_DEVICE_PROPERTY_FLAG_ONDEMANDPAGING = ZE_BIT(3), ///< Device supports on-demand page-faulting. - ZE_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_property_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device properties queried using ::zeDeviceGetProperties -typedef struct _ze_device_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_device_type_t type; ///< [out] generic device type - uint32_t vendorId; ///< [out] vendor id from PCI configuration - uint32_t deviceId; ///< [out] device id from PCI configuration - ze_device_property_flags_t flags; ///< [out] 0 (none) or a valid combination of ::ze_device_property_flag_t - uint32_t subdeviceId; ///< [out] sub-device id. Only valid if ::ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE - ///< is set. - uint32_t coreClockRate; ///< [out] Clock rate for device core. - uint64_t maxMemAllocSize; ///< [out] Maximum memory allocation size. - uint32_t maxHardwareContexts; ///< [out] Maximum number of logical hardware contexts. - uint32_t maxCommandQueuePriority; ///< [out] Maximum priority for command queues. Higher value is higher - ///< priority. - uint32_t numThreadsPerEU; ///< [out] Number of threads per EU. - uint32_t physicalEUSimdWidth; ///< [out] The physical EU simd width. - uint32_t numEUsPerSubslice; ///< [out] Number of EUs per sub-slice. - uint32_t numSubslicesPerSlice; ///< [out] Number of sub-slices per slice. - uint32_t numSlices; ///< [out] Number of slices. - uint64_t timerResolution; ///< [out] Returns the resolution of device timer in cycles per second used - ///< for profiling, timestamps, etc. - uint32_t timestampValidBits; ///< [out] Returns the number of valid bits in the timestamp value. - uint32_t kernelTimestampValidBits; ///< [out] Returns the number of valid bits in the kernel timestamp values - ze_device_uuid_t uuid; ///< [out] universal unique identifier. Note: Subdevices will have their - ///< own uuid. - char name[ZE_MAX_DEVICE_NAME]; ///< [out] Device name - -} ze_device_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device thread identifier. -typedef struct _ze_device_thread_t -{ - uint32_t slice; ///< [in,out] the slice number. - ///< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numSlices. - uint32_t subslice; ///< [in,out] the sub-slice number within its slice. - ///< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numSubslicesPerSlice. - uint32_t eu; ///< [in,out] the EU number within its sub-slice. - ///< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numEUsPerSubslice. - uint32_t thread; ///< [in,out] the thread number within its EU. - ///< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numThreadsPerEU. - -} ze_device_thread_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves properties of the device. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clGetDeviceInfo -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pDeviceProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_device_properties_t* pDeviceProperties ///< [in,out] query result for device properties - ); - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_SUBGROUPSIZE_COUNT -/// @brief Maximum number of subgroup sizes supported. -#define ZE_SUBGROUPSIZE_COUNT 8 -#endif // ZE_SUBGROUPSIZE_COUNT - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device compute properties queried using ::zeDeviceGetComputeProperties -typedef struct _ze_device_compute_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - uint32_t maxTotalGroupSize; ///< [out] Maximum items per compute group. (groupSizeX * groupSizeY * - ///< groupSizeZ) <= maxTotalGroupSize - uint32_t maxGroupSizeX; ///< [out] Maximum items for X dimension in group - uint32_t maxGroupSizeY; ///< [out] Maximum items for Y dimension in group - uint32_t maxGroupSizeZ; ///< [out] Maximum items for Z dimension in group - uint32_t maxGroupCountX; ///< [out] Maximum groups that can be launched for x dimension - uint32_t maxGroupCountY; ///< [out] Maximum groups that can be launched for y dimension - uint32_t maxGroupCountZ; ///< [out] Maximum groups that can be launched for z dimension - uint32_t maxSharedLocalMemory; ///< [out] Maximum shared local memory per group. - uint32_t numSubGroupSizes; ///< [out] Number of subgroup sizes supported. This indicates number of - ///< entries in subGroupSizes. - uint32_t subGroupSizes[ZE_SUBGROUPSIZE_COUNT]; ///< [out] Size group sizes supported. - -} ze_device_compute_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves compute properties of the device. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clGetDeviceInfo -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pComputeProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetComputeProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_device_compute_properties_t* pComputeProperties ///< [in,out] query result for compute properties - ); - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_NATIVE_KERNEL_UUID_SIZE -/// @brief Maximum native kernel universal unique id (UUID) size in bytes -#define ZE_MAX_NATIVE_KERNEL_UUID_SIZE 16 -#endif // ZE_MAX_NATIVE_KERNEL_UUID_SIZE - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Native kernel universal unique id (UUID) -typedef struct _ze_native_kernel_uuid_t -{ - uint8_t id[ZE_MAX_NATIVE_KERNEL_UUID_SIZE]; ///< [out] opaque data representing a native kernel UUID - -} ze_native_kernel_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported device module flags -typedef uint32_t ze_device_module_flags_t; -typedef enum _ze_device_module_flag_t -{ - ZE_DEVICE_MODULE_FLAG_FP16 = ZE_BIT(0), ///< Device supports 16-bit floating-point operations - ZE_DEVICE_MODULE_FLAG_FP64 = ZE_BIT(1), ///< Device supports 64-bit floating-point operations - ZE_DEVICE_MODULE_FLAG_INT64_ATOMICS = ZE_BIT(2),///< Device supports 64-bit atomic operations - ZE_DEVICE_MODULE_FLAG_DP4A = ZE_BIT(3), ///< Device supports four component dot product and accumulate operations - ZE_DEVICE_MODULE_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_module_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported floating-Point capability flags -typedef uint32_t ze_device_fp_flags_t; -typedef enum _ze_device_fp_flag_t -{ - ZE_DEVICE_FP_FLAG_DENORM = ZE_BIT(0), ///< Supports denorms - ZE_DEVICE_FP_FLAG_INF_NAN = ZE_BIT(1), ///< Supports INF and quiet NaNs - ZE_DEVICE_FP_FLAG_ROUND_TO_NEAREST = ZE_BIT(2), ///< Supports rounding to nearest even rounding mode - ZE_DEVICE_FP_FLAG_ROUND_TO_ZERO = ZE_BIT(3), ///< Supports rounding to zero. - ZE_DEVICE_FP_FLAG_ROUND_TO_INF = ZE_BIT(4), ///< Supports rounding to both positive and negative INF. - ZE_DEVICE_FP_FLAG_FMA = ZE_BIT(5), ///< Supports IEEE754-2008 fused multiply-add. - ZE_DEVICE_FP_FLAG_ROUNDED_DIVIDE_SQRT = ZE_BIT(6), ///< Supports rounding as defined by IEEE754 for divide and sqrt - ///< operations. - ZE_DEVICE_FP_FLAG_SOFT_FLOAT = ZE_BIT(7), ///< Uses software implementation for basic floating-point operations. - ZE_DEVICE_FP_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_fp_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device module properties queried using ::zeDeviceGetModuleProperties -typedef struct _ze_device_module_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - uint32_t spirvVersionSupported; ///< [out] Maximum supported SPIR-V version. - ///< Returns zero if SPIR-V is not supported. - ///< Contains major and minor attributes, use ::ZE_MAJOR_VERSION and ::ZE_MINOR_VERSION. - ze_device_module_flags_t flags; ///< [out] 0 or a valid combination of ::ze_device_module_flag_t - ze_device_fp_flags_t fp16flags; ///< [out] Capabilities for half-precision floating-point operations. - ///< returns 0 (if ::ZE_DEVICE_MODULE_FLAG_FP16 is not set) or a - ///< combination of ::ze_device_fp_flag_t. - ze_device_fp_flags_t fp32flags; ///< [out] Capabilities for single-precision floating-point operations. - ///< returns a combination of ::ze_device_fp_flag_t. - ze_device_fp_flags_t fp64flags; ///< [out] Capabilities for double-precision floating-point operations. - ///< returns 0 (if ::ZE_DEVICE_MODULE_FLAG_FP64 is not set) or a - ///< combination of ::ze_device_fp_flag_t. - uint32_t maxArgumentsSize; ///< [out] Maximum kernel argument size that is supported. - uint32_t printfBufferSize; ///< [out] Maximum size of internal buffer that holds output of printf - ///< calls from kernel. - ze_native_kernel_uuid_t nativeKernelSupported; ///< [out] Compatibility UUID of supported native kernel. - ///< UUID may or may not be the same across driver release, devices, or - ///< operating systems. - ///< Application is responsible for ensuring UUID matches before creating - ///< module using - ///< previously created native kernel. - -} ze_device_module_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves module properties of the device -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pModuleProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetModuleProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_device_module_properties_t* pModuleProperties///< [in,out] query result for module properties - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported command queue group property flags -typedef uint32_t ze_command_queue_group_property_flags_t; -typedef enum _ze_command_queue_group_property_flag_t -{ - ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_COMPUTE = ZE_BIT(0), ///< Command queue group supports enqueing compute commands. - ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_COPY = ZE_BIT(1), ///< Command queue group supports enqueing copy commands. - ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_COOPERATIVE_KERNELS = ZE_BIT(2), ///< Command queue group supports cooperative kernels. - ///< See ::zeCommandListAppendLaunchCooperativeKernel for more details. - ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_METRICS = ZE_BIT(3), ///< Command queue groups supports metric queries. - ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_command_queue_group_property_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Command queue group properties queried using -/// ::zeDeviceGetCommandQueueGroupProperties -typedef struct _ze_command_queue_group_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_command_queue_group_property_flags_t flags; ///< [out] 0 (none) or a valid combination of - ///< ::ze_command_queue_group_property_flag_t - size_t maxMemoryFillPatternSize; ///< [out] maximum `pattern_size` supported by command queue group. - ///< See ::zeCommandListAppendMemoryFill for more details. - uint32_t numQueues; ///< [out] the number of physical engines within the group. - -} ze_command_queue_group_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves command queue group properties of the device. -/// -/// @details -/// - Properties are reported for each physical command queue type supported -/// by the device. -/// - Multiple calls to this function will return properties in the same -/// order. -/// - The order in which the properties are returned defines the command -/// queue group's ordinal. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **vkGetPhysicalDeviceQueueFamilyProperties** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetCommandQueueGroupProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - uint32_t* pCount, ///< [in,out] pointer to the number of command queue group properties. - ///< if count is zero, then the driver shall update the value with the - ///< total number of command queue group properties available. - ///< if count is greater than the number of command queue group properties - ///< available, then the driver shall update the value with the correct - ///< number of command queue group properties available. - ze_command_queue_group_properties_t* pCommandQueueGroupProperties ///< [in,out][optional][range(0, *pCount)] array of query results for - ///< command queue group properties. - ///< if count is less than the number of command queue group properties - ///< available, then driver shall only retrieve that number of command - ///< queue group properties. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported device memory property flags -typedef uint32_t ze_device_memory_property_flags_t; -typedef enum _ze_device_memory_property_flag_t -{ - ZE_DEVICE_MEMORY_PROPERTY_FLAG_TBD = ZE_BIT(0), ///< reserved for future use - ZE_DEVICE_MEMORY_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_memory_property_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device local memory properties queried using -/// ::zeDeviceGetMemoryProperties -typedef struct _ze_device_memory_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_device_memory_property_flags_t flags; ///< [out] 0 (none) or a valid combination of - ///< ::ze_device_memory_property_flag_t - uint32_t maxClockRate; ///< [out] Maximum clock rate for device memory. - uint32_t maxBusWidth; ///< [out] Maximum bus width between device and memory. - uint64_t totalSize; ///< [out] Total memory size in bytes that is available to the device. - char name[ZE_MAX_DEVICE_NAME]; ///< [out] Memory name - -} ze_device_memory_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves local memory properties of the device. -/// -/// @details -/// - Properties are reported for each physical memory type supported by the -/// device. -/// - Multiple calls to this function will return properties in the same -/// order. -/// - The order in which the properties are returned defines the device's -/// local memory ordinal. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clGetDeviceInfo -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetMemoryProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - uint32_t* pCount, ///< [in,out] pointer to the number of memory properties. - ///< if count is zero, then the driver shall update the value with the - ///< total number of memory properties available. - ///< if count is greater than the number of memory properties available, - ///< then the driver shall update the value with the correct number of - ///< memory properties available. - ze_device_memory_properties_t* pMemProperties ///< [in,out][optional][range(0, *pCount)] array of query results for - ///< memory properties. - ///< if count is less than the number of memory properties available, then - ///< driver shall only retrieve that number of memory properties. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Memory access capability flags -/// -/// @details -/// - Supported access capabilities for different types of memory -/// allocations -typedef uint32_t ze_memory_access_cap_flags_t; -typedef enum _ze_memory_access_cap_flag_t -{ - ZE_MEMORY_ACCESS_CAP_FLAG_RW = ZE_BIT(0), ///< Supports load/store access - ZE_MEMORY_ACCESS_CAP_FLAG_ATOMIC = ZE_BIT(1), ///< Supports atomic access - ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT = ZE_BIT(2), ///< Supports concurrent access - ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT_ATOMIC = ZE_BIT(3),///< Supports concurrent atomic access - ZE_MEMORY_ACCESS_CAP_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_memory_access_cap_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device memory access properties queried using -/// ::zeDeviceGetMemoryAccessProperties -typedef struct _ze_device_memory_access_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_memory_access_cap_flags_t hostAllocCapabilities; ///< [out] host memory capabilities. - ///< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flag_t. - ze_memory_access_cap_flags_t deviceAllocCapabilities; ///< [out] device memory capabilities. - ///< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flag_t. - ze_memory_access_cap_flags_t sharedSingleDeviceAllocCapabilities; ///< [out] shared, single-device memory capabilities. - ///< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flag_t. - ze_memory_access_cap_flags_t sharedCrossDeviceAllocCapabilities;///< [out] shared, cross-device memory capabilities. - ///< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flag_t. - ze_memory_access_cap_flags_t sharedSystemAllocCapabilities; ///< [out] shared, system memory capabilities. - ///< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flag_t. - -} ze_device_memory_access_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves memory access properties of the device. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clGetDeviceInfo -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pMemAccessProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetMemoryAccessProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_device_memory_access_properties_t* pMemAccessProperties ///< [in,out] query result for memory access properties - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported cache control property flags -typedef uint32_t ze_device_cache_property_flags_t; -typedef enum _ze_device_cache_property_flag_t -{ - ZE_DEVICE_CACHE_PROPERTY_FLAG_USER_CONTROL = ZE_BIT(0), ///< Device support User Cache Control (i.e. SLM section vs Generic Cache) - ZE_DEVICE_CACHE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_cache_property_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device cache properties queried using ::zeDeviceGetCacheProperties -typedef struct _ze_device_cache_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_device_cache_property_flags_t flags; ///< [out] 0 (none) or a valid combination of - ///< ::ze_device_cache_property_flag_t - size_t cacheSize; ///< [out] Per-cache size, in bytes - -} ze_device_cache_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves cache properties of the device -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clGetDeviceInfo -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetCacheProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - uint32_t* pCount, ///< [in,out] pointer to the number of cache properties. - ///< if count is zero, then the driver shall update the value with the - ///< total number of cache properties available. - ///< if count is greater than the number of cache properties available, - ///< then the driver shall update the value with the correct number of - ///< cache properties available. - ze_device_cache_properties_t* pCacheProperties ///< [in,out][optional][range(0, *pCount)] array of query results for cache properties. - ///< if count is less than the number of cache properties available, then - ///< driver shall only retrieve that number of cache properties. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device image properties queried using ::zeDeviceGetImageProperties -typedef struct _ze_device_image_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - uint32_t maxImageDims1D; ///< [out] Maximum image dimensions for 1D resources. if 0, then 1D images - ///< are unsupported. - uint32_t maxImageDims2D; ///< [out] Maximum image dimensions for 2D resources. if 0, then 2D images - ///< are unsupported. - uint32_t maxImageDims3D; ///< [out] Maximum image dimensions for 3D resources. if 0, then 3D images - ///< are unsupported. - uint64_t maxImageBufferSize; ///< [out] Maximum image buffer size in bytes. if 0, then buffer images are - ///< unsupported. - uint32_t maxImageArraySlices; ///< [out] Maximum image array slices. if 0, then image arrays are - ///< unsupported. - uint32_t maxSamplers; ///< [out] Max samplers that can be used in kernel. if 0, then sampling is - ///< unsupported. - uint32_t maxReadImageArgs; ///< [out] Returns the maximum number of simultaneous image objects that - ///< can be read from by a kernel. if 0, then reading images is - ///< unsupported. - uint32_t maxWriteImageArgs; ///< [out] Returns the maximum number of simultaneous image objects that - ///< can be written to by a kernel. if 0, then writing images is - ///< unsupported. - -} ze_device_image_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves image properties of the device -/// -/// @details -/// - See ::zeImageGetProperties for format-specific capabilities. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pImageProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetImageProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_device_image_properties_t* pImageProperties ///< [in,out] query result for image properties - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device external memory import and export properties -typedef struct _ze_device_external_memory_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_external_memory_type_flags_t memoryAllocationImportTypes;///< [out] Supported external memory import types for memory allocations. - ze_external_memory_type_flags_t memoryAllocationExportTypes;///< [out] Supported external memory export types for memory allocations. - ze_external_memory_type_flags_t imageImportTypes; ///< [out] Supported external memory import types for images. - ze_external_memory_type_flags_t imageExportTypes; ///< [out] Supported external memory export types for images. - -} ze_device_external_memory_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves external memory import and export of the device -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pExternalMemoryProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetExternalMemoryProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_device_external_memory_properties_t* pExternalMemoryProperties ///< [in,out] query result for external memory properties - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported device peer-to-peer property flags -typedef uint32_t ze_device_p2p_property_flags_t; -typedef enum _ze_device_p2p_property_flag_t -{ - ZE_DEVICE_P2P_PROPERTY_FLAG_ACCESS = ZE_BIT(0), ///< Device supports access between peer devices. - ZE_DEVICE_P2P_PROPERTY_FLAG_ATOMICS = ZE_BIT(1),///< Device supports atomics between peer devices. - ZE_DEVICE_P2P_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_p2p_property_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device peer-to-peer properties queried using -/// ::zeDeviceGetP2PProperties -typedef struct _ze_device_p2p_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_device_p2p_property_flags_t flags; ///< [out] 0 (none) or a valid combination of - ///< ::ze_device_p2p_property_flag_t - -} ze_device_p2p_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves peer-to-peer properties between one device and a peer -/// devices -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// + `nullptr == hPeerDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pP2PProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetP2PProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device performing the access - ze_device_handle_t hPeerDevice, ///< [in] handle of the peer device with the allocation - ze_device_p2p_properties_t* pP2PProperties ///< [in,out] Peer-to-Peer properties between source and peer device - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Queries if one device can directly access peer device allocations -/// -/// @details -/// - Any device can access any other device within a node through a -/// scale-up fabric. -/// - The following are conditions for CanAccessPeer query. -/// + If both device and peer device are the same then return true. -/// + If both sub-device and peer sub-device are the same then return -/// true. -/// + If both are sub-devices and share the same parent device then -/// return true. -/// + If both device and remote device are connected by a direct or -/// indirect scale-up fabric or over PCIe (same root complex or shared -/// PCIe switch) then true. -/// + If both sub-device and remote parent device (and vice-versa) are -/// connected by a direct or indirect scale-up fabric or over PCIe -/// (same root complex or shared PCIe switch) then true. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// + `nullptr == hPeerDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == value` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceCanAccessPeer( - ze_device_handle_t hDevice, ///< [in] handle of the device performing the access - ze_device_handle_t hPeerDevice, ///< [in] handle of the peer device with the allocation - ze_bool_t* value ///< [out] returned access capability - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Returns current status of the device. -/// -/// @details -/// - Once a device is reset, this call will update the OS handle attached -/// to the device handle. -/// - The application may call this function from simultaneous threads with -/// the same device handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_SUCCESS -/// + Device is available for use. -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// + Device is lost; must be reset for use. -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetStatus( - ze_device_handle_t hDevice ///< [in] handle of the device - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Returns synchronized Host and device global timestamps. -/// -/// @details -/// - The application may call this function from simultaneous threads with -/// the same device handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == hostTimestamp` -/// + `nullptr == deviceTimestamp` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeDeviceGetGlobalTimestamps( - ze_device_handle_t hDevice, ///< [in] handle of the device - uint64_t* hostTimestamp, ///< [out] value of the Host's global timestamp that correlates with the - ///< Device's global timestamp value - uint64_t* deviceTimestamp ///< [out] value of the Device's global timestamp that correlates with the - ///< Host's global timestamp value - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Context -#if !defined(__GNUC__) -#pragma region context -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported context creation flags -typedef uint32_t ze_context_flags_t; -typedef enum _ze_context_flag_t -{ - ZE_CONTEXT_FLAG_TBD = ZE_BIT(0), ///< reserved for future use - ZE_CONTEXT_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_context_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Context descriptor -typedef struct _ze_context_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_context_flags_t flags; ///< [in] creation flags. - ///< must be 0 (default) or a valid combination of ::ze_context_flag_t; - ///< default behavior may use implicit driver-based heuristics. - -} ze_context_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a context for the driver. -/// -/// @details -/// - The application must only use the context for the driver which was -/// provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phContext` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < desc->flags` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextCreate( - ze_driver_handle_t hDriver, ///< [in] handle of the driver object - const ze_context_desc_t* desc, ///< [in] pointer to context descriptor - ze_context_handle_t* phContext ///< [out] pointer to handle of context object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a context for the driver. -/// -/// @details -/// - The application must only use the context for the driver which was -/// provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDriver` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phContext` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < desc->flags` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phDevices) && (0 < numDevices)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextCreateEx( - ze_driver_handle_t hDriver, ///< [in] handle of the driver object - const ze_context_desc_t* desc, ///< [in] pointer to context descriptor - uint32_t numDevices, ///< [in][optional] number of device handles; must be 0 if `nullptr == - ///< phDevices` - ze_device_handle_t* phDevices, ///< [in][optional][range(0, numDevices)] array of device handles which - ///< context has visibility. - ///< if nullptr, then all devices supported by the driver instance are - ///< visible to the context. - ///< otherwise, context only has visibility to devices in this array. - ze_context_handle_t* phContext ///< [out] pointer to handle of context object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys a context. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the context before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this context. -/// - The application must **not** call this function from simultaneous -/// threads with the same context handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextDestroy( - ze_context_handle_t hContext ///< [in][release] handle of context object to destroy - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Returns current status of the context. -/// -/// @details -/// - The application may call this function from simultaneous threads with -/// the same context handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_SUCCESS -/// + Context is available for use. -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// + Context is invalid; due to device lost or reset. -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextGetStatus( - ze_context_handle_t hContext ///< [in] handle of context object - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Command Queue -#if !defined(__GNUC__) -#pragma region cmdqueue -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported command queue flags -typedef uint32_t ze_command_queue_flags_t; -typedef enum _ze_command_queue_flag_t -{ - ZE_COMMAND_QUEUE_FLAG_EXPLICIT_ONLY = ZE_BIT(0),///< command queue should be optimized for submission to a single device engine. - ///< driver **must** disable any implicit optimizations for distributing - ///< work across multiple engines. - ///< this flag should be used when applications want full control over - ///< multi-engine submission and scheduling. - ZE_COMMAND_QUEUE_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_command_queue_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported command queue modes -typedef enum _ze_command_queue_mode_t -{ - ZE_COMMAND_QUEUE_MODE_DEFAULT = 0, ///< implicit default behavior; uses driver-based heuristics - ZE_COMMAND_QUEUE_MODE_SYNCHRONOUS = 1, ///< Device execution always completes immediately on execute; - ///< Host thread is blocked using wait on implicit synchronization object - ZE_COMMAND_QUEUE_MODE_ASYNCHRONOUS = 2, ///< Device execution is scheduled and will complete in future; - ///< explicit synchronization object must be used to determine completeness - ZE_COMMAND_QUEUE_MODE_FORCE_UINT32 = 0x7fffffff - -} ze_command_queue_mode_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported command queue priorities -typedef enum _ze_command_queue_priority_t -{ - ZE_COMMAND_QUEUE_PRIORITY_NORMAL = 0, ///< [default] normal priority - ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_LOW = 1, ///< lower priority than normal - ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_HIGH = 2, ///< higher priority than normal - ZE_COMMAND_QUEUE_PRIORITY_FORCE_UINT32 = 0x7fffffff - -} ze_command_queue_priority_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Command Queue descriptor -typedef struct _ze_command_queue_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - uint32_t ordinal; ///< [in] command queue group ordinal - uint32_t index; ///< [in] command queue index within the group; - ///< must be zero if ::ZE_COMMAND_QUEUE_FLAG_EXPLICIT_ONLY is not set - ze_command_queue_flags_t flags; ///< [in] usage flags. - ///< must be 0 (default) or a valid combination of ::ze_command_queue_flag_t; - ///< default behavior may use implicit driver-based heuristics to balance - ///< latency and throughput. - ze_command_queue_mode_t mode; ///< [in] operation mode - ze_command_queue_priority_t priority; ///< [in] priority - -} ze_command_queue_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a command queue on the context. -/// -/// @details -/// - A command queue represents a logical input stream to the device, tied -/// to a physical input stream. -/// - The application must only use the command queue for the device, or its -/// sub-devices, which was provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @remarks -/// _Analogues_ -/// - **clCreateCommandQueue** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phCommandQueue` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < desc->flags` -/// + `::ZE_COMMAND_QUEUE_MODE_ASYNCHRONOUS < desc->mode` -/// + `::ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_HIGH < desc->priority` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandQueueCreate( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device object - const ze_command_queue_desc_t* desc, ///< [in] pointer to command queue descriptor - ze_command_queue_handle_t* phCommandQueue ///< [out] pointer to handle of command queue object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys a command queue. -/// -/// @details -/// - The application must destroy all fence handles created from the -/// command queue before destroying the command queue itself -/// - The application must ensure the device is not currently referencing -/// the command queue before it is deleted -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this command queue -/// - The application must **not** call this function from simultaneous -/// threads with the same command queue handle. -/// - The implementation of this function must be thread-safe. -/// -/// @remarks -/// _Analogues_ -/// - **clReleaseCommandQueue** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandQueue` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandQueueDestroy( - ze_command_queue_handle_t hCommandQueue ///< [in][release] handle of command queue object to destroy - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Executes a command list in a command queue. -/// -/// @details -/// - The command lists are submitted to the device in the order they are -/// received, whether from multiple calls (on the same or different -/// threads) or a single call with multiple command lists. -/// - The application must ensure the command lists are accessible by the -/// device on which the command queue was created. -/// - The application must ensure the command lists are not currently -/// referencing the command list since the implementation is allowed to -/// modify the contents of the command list for submission. -/// - The application must only execute command lists created with an -/// identical command queue group ordinal to the command queue. -/// - The application must use a fence created using the same command queue. -/// - The application must ensure the command queue, command list and fence -/// were created on the same context. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - vkQueueSubmit -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandQueue` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == phCommandLists` -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `0 == numCommandLists` -/// - ::ZE_RESULT_ERROR_INVALID_COMMAND_LIST_TYPE -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandQueueExecuteCommandLists( - ze_command_queue_handle_t hCommandQueue, ///< [in] handle of the command queue - uint32_t numCommandLists, ///< [in] number of command lists to execute - ze_command_list_handle_t* phCommandLists, ///< [in][range(0, numCommandLists)] list of handles of the command lists - ///< to execute - ze_fence_handle_t hFence ///< [in][optional] handle of the fence to signal on completion - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Synchronizes a command queue by waiting on the host. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandQueue` -/// - ::ZE_RESULT_NOT_READY -/// + timeout expired -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandQueueSynchronize( - ze_command_queue_handle_t hCommandQueue, ///< [in] handle of the command queue - uint64_t timeout ///< [in] if non-zero, then indicates the maximum time (in nanoseconds) to - ///< yield before returning ::ZE_RESULT_SUCCESS or ::ZE_RESULT_NOT_READY; - ///< if zero, then immediately returns the status of the command queue; - ///< if UINT64_MAX, then function will not return until complete or device - ///< is lost. - ///< Due to external dependencies, timeout may be rounded to the closest - ///< value allowed by the accuracy of those dependencies. - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Command List -#if !defined(__GNUC__) -#pragma region cmdlist -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported command list creation flags -typedef uint32_t ze_command_list_flags_t; -typedef enum _ze_command_list_flag_t -{ - ZE_COMMAND_LIST_FLAG_RELAXED_ORDERING = ZE_BIT(0), ///< driver may reorder commands (e.g., kernels, copies) between barriers - ///< and synchronization primitives. - ///< using this flag may increase Host overhead of ::zeCommandListClose. - ///< therefore, this flag should **not** be set for low-latency usage-models. - ZE_COMMAND_LIST_FLAG_MAXIMIZE_THROUGHPUT = ZE_BIT(1), ///< driver may perform additional optimizations that increase execution - ///< throughput. - ///< using this flag may increase Host overhead of ::zeCommandListClose and ::zeCommandQueueExecuteCommandLists. - ///< therefore, this flag should **not** be set for low-latency usage-models. - ZE_COMMAND_LIST_FLAG_EXPLICIT_ONLY = ZE_BIT(2), ///< command list should be optimized for submission to a single command - ///< queue and device engine. - ///< driver **must** disable any implicit optimizations for distributing - ///< work across multiple engines. - ///< this flag should be used when applications want full control over - ///< multi-engine submission and scheduling. - ZE_COMMAND_LIST_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_command_list_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Command List descriptor -typedef struct _ze_command_list_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - uint32_t commandQueueGroupOrdinal; ///< [in] command queue group ordinal to which this command list will be - ///< submitted - ze_command_list_flags_t flags; ///< [in] usage flags. - ///< must be 0 (default) or a valid combination of ::ze_command_list_flag_t; - ///< default behavior may use implicit driver-based heuristics to balance - ///< latency and throughput. - -} ze_command_list_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a command list on the context. -/// -/// @details -/// - A command list represents a sequence of commands for execution on a -/// command queue. -/// - The command list is created in the 'open' state. -/// - The application must only use the command list for the device, or its -/// sub-devices, which was provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x7 < desc->flags` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListCreate( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device object - const ze_command_list_desc_t* desc, ///< [in] pointer to command list descriptor - ze_command_list_handle_t* phCommandList ///< [out] pointer to handle of command list object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates an immediate command list on the context. -/// -/// @details -/// - An immediate command list is used for low-latency submission of -/// commands. -/// - An immediate command list creates an implicit command queue. -/// - The command list is created in the 'open' state and never needs to be -/// closed. -/// - The application must only use the command list for the device, or its -/// sub-devices, which was provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == altdesc` -/// + `nullptr == phCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < altdesc->flags` -/// + `::ZE_COMMAND_QUEUE_MODE_ASYNCHRONOUS < altdesc->mode` -/// + `::ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_HIGH < altdesc->priority` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListCreateImmediate( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device object - const ze_command_queue_desc_t* altdesc, ///< [in] pointer to command queue descriptor - ze_command_list_handle_t* phCommandList ///< [out] pointer to handle of command list object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys a command list. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the command list before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this command list. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListDestroy( - ze_command_list_handle_t hCommandList ///< [in][release] handle of command list object to destroy - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Closes a command list; ready to be executed by a command queue. -/// -/// @details -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListClose( - ze_command_list_handle_t hCommandList ///< [in] handle of command list object to close - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Reset a command list to initial (empty) state; ready for appending -/// commands. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the command list before it is reset -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListReset( - ze_command_list_handle_t hCommandList ///< [in] handle of command list object to reset - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Appends a memory write of the device's global timestamp value into a -/// command list. -/// -/// @details -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The timestamp frequency can be queried from -/// ::ze_device_properties_t.timerResolution. -/// - The number of valid bits in the timestamp value can be queried from -/// ::ze_device_properties_t.timestampValidBits. -/// - The application must ensure the memory pointed to by dstptr is -/// accessible by the device on which the command list was created. -/// - The application must ensure the command list and events were created, -/// and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == dstptr` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendWriteGlobalTimestamp( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - uint64_t* dstptr, ///< [in,out] pointer to memory where timestamp value will be written; must - ///< be 8byte-aligned. - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before executing query; - ///< must be 0 if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before executing query - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Barrier -#if !defined(__GNUC__) -#pragma region barrier -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Appends an execution and global memory barrier into a command list. -/// -/// @details -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - If numWaitEvents is zero, then all previous commands are completed -/// prior to the execution of the barrier. -/// - If numWaitEvents is non-zero, then then all phWaitEvents must be -/// signaled prior to the execution of the barrier. -/// - This command blocks all following commands from beginning until the -/// execution of the barrier completes. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **vkCmdPipelineBarrier** -/// - clEnqueueBarrierWithWaitList -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendBarrier( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before executing barrier; - ///< must be 0 if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before executing barrier - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Appends a global memory ranges barrier into a command list. -/// -/// @details -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - If numWaitEvents is zero, then all previous commands are completed -/// prior to the execution of the barrier. -/// - If numWaitEvents is non-zero, then then all phWaitEvents must be -/// signaled prior to the execution of the barrier. -/// - This command blocks all following commands from beginning until the -/// execution of the barrier completes. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pRangeSizes` -/// + `nullptr == pRanges` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendMemoryRangesBarrier( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - uint32_t numRanges, ///< [in] number of memory ranges - const size_t* pRangeSizes, ///< [in][range(0, numRanges)] array of sizes of memory range - const void** pRanges, ///< [in][range(0, numRanges)] array of memory ranges - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before executing barrier; - ///< must be 0 if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before executing barrier - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Ensures in-bound writes to the device are globally observable. -/// -/// @details -/// - This is a special-case system level barrier that can be used to ensure -/// global observability of writes; -/// typically needed after a producer (e.g., NIC) performs direct writes -/// to the device's memory (e.g., Direct RDMA writes). -/// This is typically required when the memory corresponding to the writes -/// is subsequently accessed from a remote device. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextSystemBarrier( - ze_context_handle_t hContext, ///< [in] handle of context object - ze_device_handle_t hDevice ///< [in] handle of the device - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Copies -#if !defined(__GNUC__) -#pragma region copy -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copies host, device, or shared memory. -/// -/// @details -/// - The application must ensure the memory pointed to by dstptr and srcptr -/// is accessible by the device on which the command list was created. -/// - The implementation must not access the memory pointed to by dstptr and -/// srcptr as they are free to be modified by either the Host or device up -/// until execution. -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The application must ensure the command list and events were created, -/// and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clEnqueueCopyBuffer** -/// - **clEnqueueReadBuffer** -/// - **clEnqueueWriteBuffer** -/// - **clEnqueueSVMMemcpy** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == dstptr` -/// + `nullptr == srcptr` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendMemoryCopy( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - void* dstptr, ///< [in] pointer to destination memory to copy to - const void* srcptr, ///< [in] pointer to source memory to copy from - size_t size, ///< [in] size in bytes to copy - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Initializes host, device, or shared memory. -/// -/// @details -/// - The application must ensure the memory pointed to by dstptr is -/// accessible by the device on which the command list was created. -/// - The implementation must not access the memory pointed to by dstptr as -/// it is free to be modified by either the Host or device up until -/// execution. -/// - The value to initialize memory to is described by the pattern and the -/// pattern size. -/// - The pattern size must be a power-of-two and less than -/// ::ze_command_queue_group_properties_t.maxMemoryFillPatternSize. -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The application must enusre the command list and events were created, -/// and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clEnqueueFillBuffer** -/// - **clEnqueueSVMMemFill** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// + `nullptr == pattern` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendMemoryFill( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - void* ptr, ///< [in] pointer to memory to initialize - const void* pattern, ///< [in] pointer to value to initialize memory to - size_t pattern_size, ///< [in] size in bytes of the value to initialize memory to - size_t size, ///< [in] size in bytes to initialize - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copy region descriptor -typedef struct _ze_copy_region_t -{ - uint32_t originX; ///< [in] The origin x offset for region in bytes - uint32_t originY; ///< [in] The origin y offset for region in rows - uint32_t originZ; ///< [in] The origin z offset for region in slices - uint32_t width; ///< [in] The region width relative to origin in bytes - uint32_t height; ///< [in] The region height relative to origin in rows - uint32_t depth; ///< [in] The region depth relative to origin in slices. Set this to 0 for - ///< 2D copy. - -} ze_copy_region_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copies a region from a 2D or 3D array of host, device, or shared -/// memory. -/// -/// @details -/// - The application must ensure the memory pointed to by dstptr and srcptr -/// is accessible by the device on which the command list was created. -/// - The implementation must not access the memory pointed to by dstptr and -/// srcptr as they are free to be modified by either the Host or device up -/// until execution. -/// - The region width, height, and depth for both src and dst must be same. -/// The origins can be different. -/// - The src and dst regions cannot be overlapping. -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The application must ensure the command list and events were created, -/// and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == dstptr` -/// + `nullptr == dstRegion` -/// + `nullptr == srcptr` -/// + `nullptr == srcRegion` -/// - ::ZE_RESULT_ERROR_OVERLAPPING_REGIONS -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendMemoryCopyRegion( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - void* dstptr, ///< [in] pointer to destination memory to copy to - const ze_copy_region_t* dstRegion, ///< [in] pointer to destination region to copy to - uint32_t dstPitch, ///< [in] destination pitch in bytes - uint32_t dstSlicePitch, ///< [in] destination slice pitch in bytes. This is required for 3D region - ///< copies where ::ze_copy_region_t.depth is not 0, otherwise it's - ///< ignored. - const void* srcptr, ///< [in] pointer to source memory to copy from - const ze_copy_region_t* srcRegion, ///< [in] pointer to source region to copy from - uint32_t srcPitch, ///< [in] source pitch in bytes - uint32_t srcSlicePitch, ///< [in] source slice pitch in bytes. This is required for 3D region - ///< copies where ::ze_copy_region_t.depth is not 0, otherwise it's - ///< ignored. - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copies host, device, or shared memory from another context. -/// -/// @details -/// - The current active and source context must be from the same driver. -/// - The application must ensure the memory pointed to by dstptr and srcptr -/// is accessible by the device on which the command list was created. -/// - The implementation must not access the memory pointed to by dstptr and -/// srcptr as they are free to be modified by either the Host or device up -/// until execution. -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The application must ensure the command list and events were created, -/// and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hContextSrc` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == dstptr` -/// + `nullptr == srcptr` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendMemoryCopyFromContext( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - void* dstptr, ///< [in] pointer to destination memory to copy to - ze_context_handle_t hContextSrc, ///< [in] handle of source context object - const void* srcptr, ///< [in] pointer to source memory to copy from - size_t size, ///< [in] size in bytes to copy - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copies an image. -/// -/// @details -/// - The application must ensure the image and events are accessible by the -/// device on which the command list was created. -/// - The application must ensure the image format descriptors for both -/// source and destination images are the same. -/// - The application must ensure the command list, images and events were -/// created on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clEnqueueCopyImage** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hDstImage` -/// + `nullptr == hSrcImage` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendImageCopy( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - ze_image_handle_t hDstImage, ///< [in] handle of destination image to copy to - ze_image_handle_t hSrcImage, ///< [in] handle of source image to copy from - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Region descriptor -typedef struct _ze_image_region_t -{ - uint32_t originX; ///< [in] The origin x offset for region in pixels - uint32_t originY; ///< [in] The origin y offset for region in pixels - uint32_t originZ; ///< [in] The origin z offset for region in pixels - uint32_t width; ///< [in] The region width relative to origin in pixels - uint32_t height; ///< [in] The region height relative to origin in pixels - uint32_t depth; ///< [in] The region depth relative to origin. For 1D or 2D images, set - ///< this to 1. - -} ze_image_region_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copies a region of an image to another image. -/// -/// @details -/// - The application must ensure the image and events are accessible by the -/// device on which the command list was created. -/// - The region width and height for both src and dst must be same. The -/// origins can be different. -/// - The src and dst regions cannot be overlapping. -/// - The application must ensure the image format descriptors for both -/// source and destination images are the same. -/// - The application must ensure the command list, images and events were -/// created, and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hDstImage` -/// + `nullptr == hSrcImage` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_OVERLAPPING_REGIONS -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendImageCopyRegion( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - ze_image_handle_t hDstImage, ///< [in] handle of destination image to copy to - ze_image_handle_t hSrcImage, ///< [in] handle of source image to copy from - const ze_image_region_t* pDstRegion, ///< [in][optional] destination region descriptor - const ze_image_region_t* pSrcRegion, ///< [in][optional] source region descriptor - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copies from an image to device or shared memory. -/// -/// @details -/// - The application must ensure the memory pointed to by dstptr is -/// accessible by the device on which the command list was created. -/// - The implementation must not access the memory pointed to by dstptr as -/// it is free to be modified by either the Host or device up until -/// execution. -/// - The application must ensure the image and events are accessible by the -/// device on which the command list was created. -/// - The application must ensure the image format descriptor for the source -/// image is not a media format. -/// - The application must ensure the command list, image and events were -/// created, and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clEnqueueReadImage -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hSrcImage` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == dstptr` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendImageCopyToMemory( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - void* dstptr, ///< [in] pointer to destination memory to copy to - ze_image_handle_t hSrcImage, ///< [in] handle of source image to copy from - const ze_image_region_t* pSrcRegion, ///< [in][optional] source region descriptor - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Copies to an image from device or shared memory. -/// -/// @details -/// - The application must ensure the memory pointed to by srcptr is -/// accessible by the device on which the command list was created. -/// - The implementation must not access the memory pointed to by srcptr as -/// it is free to be modified by either the Host or device up until -/// execution. -/// - The application must ensure the image and events are accessible by the -/// device on which the command list was created. -/// - The application must ensure the image format descriptor for the -/// destination image is not a media format. -/// - The application must ensure the command list, image and events were -/// created, and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clEnqueueWriteImage -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hDstImage` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == srcptr` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendImageCopyFromMemory( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - ze_image_handle_t hDstImage, ///< [in] handle of destination image to copy to - const void* srcptr, ///< [in] pointer to source memory to copy from - const ze_image_region_t* pDstRegion, ///< [in][optional] destination region descriptor - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Asynchronously prefetches shared memory to the device associated with -/// the specified command list -/// -/// @details -/// - This is a hint to improve performance only and is not required for -/// correctness. -/// - Only prefetching to the device associated with the specified command -/// list is supported. -/// Prefetching to the host or to a peer device is not supported. -/// - Prefetching may not be supported for all allocation types for all devices. -/// If memory prefetching is not supported for the specified memory range -/// the prefetch hint may be ignored. -/// - Prefetching may only be supported at a device-specific granularity, -/// such as at a page boundary. -/// In this case, the memory range may be expanded such that the start and -/// end of the range satisfy granularity requirements. -/// - The application must ensure the memory pointed to by ptr is accessible -/// by the device on which the command list was created. -/// - The application must ensure the command list was created, and the -/// memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clEnqueueSVMMigrateMem -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendMemoryPrefetch( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - const void* ptr, ///< [in] pointer to start of the memory range to prefetch - size_t size ///< [in] size in bytes of the memory range to prefetch - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported memory advice hints -typedef enum _ze_memory_advice_t -{ - ZE_MEMORY_ADVICE_SET_READ_MOSTLY = 0, ///< hint that memory will be read from frequently and written to rarely - ZE_MEMORY_ADVICE_CLEAR_READ_MOSTLY = 1, ///< removes the affect of ::ZE_MEMORY_ADVICE_SET_READ_MOSTLY - ZE_MEMORY_ADVICE_SET_PREFERRED_LOCATION = 2, ///< hint that the preferred memory location is the specified device - ZE_MEMORY_ADVICE_CLEAR_PREFERRED_LOCATION = 3, ///< removes the affect of ::ZE_MEMORY_ADVICE_SET_PREFERRED_LOCATION - ZE_MEMORY_ADVICE_SET_NON_ATOMIC_MOSTLY = 4, ///< hints that memory will mostly be accessed non-atomically - ZE_MEMORY_ADVICE_CLEAR_NON_ATOMIC_MOSTLY = 5, ///< removes the affect of ::ZE_MEMORY_ADVICE_SET_NON_ATOMIC_MOSTLY - ZE_MEMORY_ADVICE_BIAS_CACHED = 6, ///< hints that memory should be cached - ZE_MEMORY_ADVICE_BIAS_UNCACHED = 7, ///< hints that memory should be not be cached - ZE_MEMORY_ADVICE_FORCE_UINT32 = 0x7fffffff - -} ze_memory_advice_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Provides advice about the use of a shared memory range -/// -/// @details -/// - Memory advice is a performance hint only and is not required for -/// functional correctness. -/// - Memory advice can be used to override driver heuristics to explicitly -/// control shared memory behavior. -/// - Not all memory advice hints may be supported for all allocation types -/// for all devices. -/// If a memory advice hint is not supported by the device it will be ignored. -/// - Memory advice may only be supported at a device-specific granularity, -/// such as at a page boundary. -/// In this case, the memory range may be expanded such that the start and -/// end of the range satisfy granularity requirements. -/// - The application must ensure the memory pointed to by ptr is accessible -/// by the device on which the command list was created. -/// - The application must ensure the command list was created, and memory -/// was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle, and the memory was -/// allocated. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `::ZE_MEMORY_ADVICE_BIAS_UNCACHED < advice` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendMemAdvise( - ze_command_list_handle_t hCommandList, ///< [in] handle of command list - ze_device_handle_t hDevice, ///< [in] device associated with the memory advice - const void* ptr, ///< [in] Pointer to the start of the memory range - size_t size, ///< [in] Size in bytes of the memory range - ze_memory_advice_t advice ///< [in] Memory advice for the memory range - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Event -#if !defined(__GNUC__) -#pragma region event -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported event pool creation flags -typedef uint32_t ze_event_pool_flags_t; -typedef enum _ze_event_pool_flag_t -{ - ZE_EVENT_POOL_FLAG_HOST_VISIBLE = ZE_BIT(0), ///< signals and waits are also visible to host - ZE_EVENT_POOL_FLAG_IPC = ZE_BIT(1), ///< signals and waits may be shared across processes - ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP = ZE_BIT(2),///< Indicates all events in pool will contain kernel timestamps; cannot be - ///< combined with ::ZE_EVENT_POOL_FLAG_IPC - ZE_EVENT_POOL_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_event_pool_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Event pool descriptor -typedef struct _ze_event_pool_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_event_pool_flags_t flags; ///< [in] creation flags. - ///< must be 0 (default) or a valid combination of ::ze_event_pool_flag_t; - ///< default behavior is signals and waits are visible to the entire device - ///< and peer devices. - uint32_t count; ///< [in] number of events within the pool; must be greater than 0 - -} ze_event_pool_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a pool of events on the context. -/// -/// @details -/// - The application must only use events within the pool for the -/// device(s), or their sub-devices, which were provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phEventPool` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x7 < desc->flags` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `0 == desc->count` -/// + `(nullptr == phDevices) && (0 < numDevices)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventPoolCreate( - ze_context_handle_t hContext, ///< [in] handle of the context object - const ze_event_pool_desc_t* desc, ///< [in] pointer to event pool descriptor - uint32_t numDevices, ///< [in][optional] number of device handles; must be 0 if `nullptr == - ///< phDevices` - ze_device_handle_t* phDevices, ///< [in][optional][range(0, numDevices)] array of device handles which - ///< have visibility to the event pool. - ///< if nullptr, then event pool is visible to all devices supported by the - ///< driver instance. - ze_event_pool_handle_t* phEventPool ///< [out] pointer handle of event pool object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Deletes an event pool object. -/// -/// @details -/// - The application must destroy all event handles created from the pool -/// before destroying the pool itself. -/// - The application must ensure the device is not currently referencing -/// the any event within the pool before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this event pool. -/// - The application must **not** call this function from simultaneous -/// threads with the same event pool handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEventPool` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventPoolDestroy( - ze_event_pool_handle_t hEventPool ///< [in][release] handle of event pool object to destroy - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported event scope flags -typedef uint32_t ze_event_scope_flags_t; -typedef enum _ze_event_scope_flag_t -{ - ZE_EVENT_SCOPE_FLAG_SUBDEVICE = ZE_BIT(0), ///< cache hierarchies are flushed or invalidated sufficient for local - ///< sub-device access - ZE_EVENT_SCOPE_FLAG_DEVICE = ZE_BIT(1), ///< cache hierarchies are flushed or invalidated sufficient for global - ///< device access and peer device access - ZE_EVENT_SCOPE_FLAG_HOST = ZE_BIT(2), ///< cache hierarchies are flushed or invalidated sufficient for device and - ///< host access - ZE_EVENT_SCOPE_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_event_scope_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Event descriptor -typedef struct _ze_event_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - uint32_t index; ///< [in] index of the event within the pool; must be less-than the count - ///< specified during pool creation - ze_event_scope_flags_t signal; ///< [in] defines the scope of relevant cache hierarchies to flush on a - ///< signal action before the event is triggered. - ///< must be 0 (default) or a valid combination of ::ze_event_scope_flag_t; - ///< default behavior is synchronization within the command list only, no - ///< additional cache hierarchies are flushed. - ze_event_scope_flags_t wait; ///< [in] defines the scope of relevant cache hierarchies to invalidate on - ///< a wait action after the event is complete. - ///< must be 0 (default) or a valid combination of ::ze_event_scope_flag_t; - ///< default behavior is synchronization within the command list only, no - ///< additional cache hierarchies are invalidated. - -} ze_event_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates an event from the pool. -/// -/// @details -/// - An event is used to communicate fine-grain host-to-device, -/// device-to-host or device-to-device dependencies have completed. -/// - The application must ensure the location in the pool is not being used -/// by another event. -/// - The application must **not** call this function from simultaneous -/// threads with the same event pool handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clCreateUserEvent** -/// - vkCreateEvent -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEventPool` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phEvent` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x7 < desc->signal` -/// + `0x7 < desc->wait` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventCreate( - ze_event_pool_handle_t hEventPool, ///< [in] handle of the event pool - const ze_event_desc_t* desc, ///< [in] pointer to event descriptor - ze_event_handle_t* phEvent ///< [out] pointer to handle of event object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Deletes an event object. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the event before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this event. -/// - The application must **not** call this function from simultaneous -/// threads with the same event handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clReleaseEvent** -/// - vkDestroyEvent -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventDestroy( - ze_event_handle_t hEvent ///< [in][release] handle of event object to destroy - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Gets an IPC event pool handle for the specified event handle that can -/// be shared with another process. -/// -/// @details -/// - Event pool must have been created with ::ZE_EVENT_POOL_FLAG_IPC. -/// - The application may call this function from simultaneous threads. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEventPool` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == phIpc` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventPoolGetIpcHandle( - ze_event_pool_handle_t hEventPool, ///< [in] handle of event pool object - ze_ipc_event_pool_handle_t* phIpc ///< [out] Returned IPC event handle - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Opens an IPC event pool handle to retrieve an event pool handle from -/// another process. -/// -/// @details -/// - Multiple calls to this function with the same IPC handle will return -/// unique event pool handles. -/// - The event handle in this process should not be freed with -/// ::zeEventPoolDestroy, but rather with ::zeEventPoolCloseIpcHandle. -/// - The application may call this function from simultaneous threads. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == phEventPool` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventPoolOpenIpcHandle( - ze_context_handle_t hContext, ///< [in] handle of the context object to associate with the IPC event pool - ///< handle - ze_ipc_event_pool_handle_t hIpc, ///< [in] IPC event pool handle - ze_event_pool_handle_t* phEventPool ///< [out] pointer handle of event pool object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Closes an IPC event handle in the current process. -/// -/// @details -/// - Closes an IPC event handle by destroying events that were opened in -/// this process using ::zeEventPoolOpenIpcHandle. -/// - The application must **not** call this function from simultaneous -/// threads with the same event pool handle. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEventPool` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventPoolCloseIpcHandle( - ze_event_pool_handle_t hEventPool ///< [in][release] handle of event pool object - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Appends a signal of the event from the device into a command list. -/// -/// @details -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The duration of an event created from an event pool that was created -/// using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag is undefined. -/// However, for consistency and orthogonality the event will report -/// correctly as signaled when used by other event API functionality. -/// - The application must ensure the command list and events were created -/// on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clSetUserEventStatus** -/// - vkCmdSetEvent -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendSignalEvent( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - ze_event_handle_t hEvent ///< [in] handle of the event - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Appends wait on event(s) on the device into a command list. -/// -/// @details -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The application must ensure the command list and events were created -/// on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == phEvents` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendWaitOnEvents( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - uint32_t numEvents, ///< [in] number of events to wait on before continuing - ze_event_handle_t* phEvents ///< [in][range(0, numEvents)] handles of the events to wait on before - ///< continuing - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Signals a event from host. -/// -/// @details -/// - The duration of an event created from an event pool that was created -/// using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag is undefined. -/// However, for consistency and orthogonality the event will report -/// correctly as signaled when used by other event API functionality. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clSetUserEventStatus -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventHostSignal( - ze_event_handle_t hEvent ///< [in] handle of the event - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief The current host thread waits on an event to be signaled. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - clWaitForEvents -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_NOT_READY -/// + timeout expired -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventHostSynchronize( - ze_event_handle_t hEvent, ///< [in] handle of the event - uint64_t timeout ///< [in] if non-zero, then indicates the maximum time (in nanoseconds) to - ///< yield before returning ::ZE_RESULT_SUCCESS or ::ZE_RESULT_NOT_READY; - ///< if zero, then operates exactly like ::zeEventQueryStatus; - ///< if UINT64_MAX, then function will not return until complete or device - ///< is lost. - ///< Due to external dependencies, timeout may be rounded to the closest - ///< value allowed by the accuracy of those dependencies. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Queries an event object's status on the host. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **clGetEventInfo** -/// - vkGetEventStatus -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_NOT_READY -/// + not signaled -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventQueryStatus( - ze_event_handle_t hEvent ///< [in] handle of the event - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Appends a reset of an event back to not signaled state into a command -/// list. -/// -/// @details -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The application must ensure the command list and events were created -/// on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - vkResetEvent -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendEventReset( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - ze_event_handle_t hEvent ///< [in] handle of the event - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief The current host thread resets an event back to not signaled state. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - vkResetEvent -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventHostReset( - ze_event_handle_t hEvent ///< [in] handle of the event - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Kernel timestamp clock data -/// -/// @details -/// - The timestamp frequency can be queried from -/// ::ze_device_properties_t.timerResolution. -/// - The number of valid bits in the timestamp value can be queried from -/// ::ze_device_properties_t.kernelTimestampValidBits. -typedef struct _ze_kernel_timestamp_data_t -{ - uint64_t kernelStart; ///< [out] device clock at start of kernel execution - uint64_t kernelEnd; ///< [out] device clock at end of kernel execution - -} ze_kernel_timestamp_data_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Kernel timestamp result -typedef struct _ze_kernel_timestamp_result_t -{ - ze_kernel_timestamp_data_t global; ///< [out] wall-clock data - ze_kernel_timestamp_data_t context; ///< [out] context-active data; only includes clocks while device context - ///< was actively executing. - -} ze_kernel_timestamp_result_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Queries an event's timestamp value on the host. -/// -/// @details -/// - The application must ensure the event was created from an event pool -/// that was created using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag. -/// - The destination memory will be unmodified if the event has not been -/// signaled. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hEvent` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == dstptr` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_NOT_READY -/// + not signaled -ZE_APIEXPORT ze_result_t ZE_APICALL -zeEventQueryKernelTimestamp( - ze_event_handle_t hEvent, ///< [in] handle of the event - ze_kernel_timestamp_result_t* dstptr ///< [in,out] pointer to memory for where timestamp result will be written. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Appends a query of an events' timestamp value(s) into a command list. -/// -/// @details -/// - The application must ensure the events are accessible by the device on -/// which the command list was created. -/// - The application must ensure the events were created from an event pool -/// that was created using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag. -/// - The application must ensure the memory pointed to by both dstptr and -/// pOffsets is accessible by the device on which the command list was -/// created. -/// - The value(s) written to the destination buffer are undefined if any -/// timestamp event has not been signaled. -/// - If pOffsets is nullptr, then multiple results will be appended -/// sequentially into memory in the same order as phEvents. -/// - The application must ensure the command list and events were created, -/// and the memory was allocated, on the same context. -/// - The application must **not** call this function from simultaneous -/// threads with the same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == phEvents` -/// + `nullptr == dstptr` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendQueryKernelTimestamps( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - uint32_t numEvents, ///< [in] the number of timestamp events to query - ze_event_handle_t* phEvents, ///< [in][range(0, numEvents)] handles of timestamp events to query - void* dstptr, ///< [in,out] pointer to memory where ::ze_kernel_timestamp_result_t will - ///< be written; must be size-aligned. - const size_t* pOffsets, ///< [in][optional][range(0, numEvents)] offset, in bytes, to write - ///< results; address must be 4byte-aligned and offsets must be - ///< size-aligned. - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before executing query; - ///< must be 0 if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before executing query - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Fence -#if !defined(__GNUC__) -#pragma region fence -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported fence creation flags -typedef uint32_t ze_fence_flags_t; -typedef enum _ze_fence_flag_t -{ - ZE_FENCE_FLAG_SIGNALED = ZE_BIT(0), ///< fence is created in the signaled state, otherwise not signaled. - ZE_FENCE_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_fence_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Fence descriptor -typedef struct _ze_fence_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_fence_flags_t flags; ///< [in] creation flags. - ///< must be 0 (default) or a valid combination of ::ze_fence_flag_t. - -} ze_fence_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a fence for the command queue. -/// -/// @details -/// - A fence is a heavyweight synchronization primitive used to communicate -/// to the host that command list execution has completed. -/// - The application must only use the fence for the command queue which -/// was provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @remarks -/// _Analogues_ -/// - **vkCreateFence** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandQueue` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phFence` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < desc->flags` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeFenceCreate( - ze_command_queue_handle_t hCommandQueue, ///< [in] handle of command queue - const ze_fence_desc_t* desc, ///< [in] pointer to fence descriptor - ze_fence_handle_t* phFence ///< [out] pointer to handle of fence object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Deletes a fence object. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the fence before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this fence. -/// - The application must **not** call this function from simultaneous -/// threads with the same fence handle. -/// - The implementation of this function must be thread-safe. -/// -/// @remarks -/// _Analogues_ -/// - **vkDestroyFence** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hFence` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeFenceDestroy( - ze_fence_handle_t hFence ///< [in][release] handle of fence object to destroy - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief The current host thread waits on a fence to be signaled. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **vkWaitForFences** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hFence` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_NOT_READY -/// + timeout expired -ZE_APIEXPORT ze_result_t ZE_APICALL -zeFenceHostSynchronize( - ze_fence_handle_t hFence, ///< [in] handle of the fence - uint64_t timeout ///< [in] if non-zero, then indicates the maximum time (in nanoseconds) to - ///< yield before returning ::ZE_RESULT_SUCCESS or ::ZE_RESULT_NOT_READY; - ///< if zero, then operates exactly like ::zeFenceQueryStatus; - ///< if UINT64_MAX, then function will not return until complete or device - ///< is lost. - ///< Due to external dependencies, timeout may be rounded to the closest - ///< value allowed by the accuracy of those dependencies. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Queries a fence object's status. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **vkGetFenceStatus** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hFence` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_NOT_READY -/// + not signaled -ZE_APIEXPORT ze_result_t ZE_APICALL -zeFenceQueryStatus( - ze_fence_handle_t hFence ///< [in] handle of the fence - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Reset a fence back to the not signaled state. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @remarks -/// _Analogues_ -/// - **vkResetFences** -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hFence` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeFenceReset( - ze_fence_handle_t hFence ///< [in] handle of the fence - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Images -#if !defined(__GNUC__) -#pragma region image -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported image creation flags -typedef uint32_t ze_image_flags_t; -typedef enum _ze_image_flag_t -{ - ZE_IMAGE_FLAG_KERNEL_WRITE = ZE_BIT(0), ///< kernels will write contents - ZE_IMAGE_FLAG_BIAS_UNCACHED = ZE_BIT(1), ///< device should not cache contents - ZE_IMAGE_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_image_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported image types -typedef enum _ze_image_type_t -{ - ZE_IMAGE_TYPE_1D = 0, ///< 1D - ZE_IMAGE_TYPE_1DARRAY = 1, ///< 1D array - ZE_IMAGE_TYPE_2D = 2, ///< 2D - ZE_IMAGE_TYPE_2DARRAY = 3, ///< 2D array - ZE_IMAGE_TYPE_3D = 4, ///< 3D - ZE_IMAGE_TYPE_BUFFER = 5, ///< Buffer - ZE_IMAGE_TYPE_FORCE_UINT32 = 0x7fffffff - -} ze_image_type_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported image format layouts -typedef enum _ze_image_format_layout_t -{ - ZE_IMAGE_FORMAT_LAYOUT_8 = 0, ///< 8-bit single component layout - ZE_IMAGE_FORMAT_LAYOUT_16 = 1, ///< 16-bit single component layout - ZE_IMAGE_FORMAT_LAYOUT_32 = 2, ///< 32-bit single component layout - ZE_IMAGE_FORMAT_LAYOUT_8_8 = 3, ///< 2-component 8-bit layout - ZE_IMAGE_FORMAT_LAYOUT_8_8_8_8 = 4, ///< 4-component 8-bit layout - ZE_IMAGE_FORMAT_LAYOUT_16_16 = 5, ///< 2-component 16-bit layout - ZE_IMAGE_FORMAT_LAYOUT_16_16_16_16 = 6, ///< 4-component 16-bit layout - ZE_IMAGE_FORMAT_LAYOUT_32_32 = 7, ///< 2-component 32-bit layout - ZE_IMAGE_FORMAT_LAYOUT_32_32_32_32 = 8, ///< 4-component 32-bit layout - ZE_IMAGE_FORMAT_LAYOUT_10_10_10_2 = 9, ///< 4-component 10_10_10_2 layout - ZE_IMAGE_FORMAT_LAYOUT_11_11_10 = 10, ///< 3-component 11_11_10 layout - ZE_IMAGE_FORMAT_LAYOUT_5_6_5 = 11, ///< 3-component 5_6_5 layout - ZE_IMAGE_FORMAT_LAYOUT_5_5_5_1 = 12, ///< 4-component 5_5_5_1 layout - ZE_IMAGE_FORMAT_LAYOUT_4_4_4_4 = 13, ///< 4-component 4_4_4_4 layout - ZE_IMAGE_FORMAT_LAYOUT_Y8 = 14, ///< Media Format: Y8. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_NV12 = 15, ///< Media Format: NV12. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_YUYV = 16, ///< Media Format: YUYV. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_VYUY = 17, ///< Media Format: VYUY. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_YVYU = 18, ///< Media Format: YVYU. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_UYVY = 19, ///< Media Format: UYVY. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_AYUV = 20, ///< Media Format: AYUV. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_P010 = 21, ///< Media Format: P010. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_Y410 = 22, ///< Media Format: Y410. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_P012 = 23, ///< Media Format: P012. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_Y16 = 24, ///< Media Format: Y16. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_P016 = 25, ///< Media Format: P016. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_Y216 = 26, ///< Media Format: Y216. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_P216 = 27, ///< Media Format: P216. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_P8 = 28, ///< Media Format: P8. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_YUY2 = 29, ///< Media Format: YUY2. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_A8P8 = 30, ///< Media Format: A8P8. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_IA44 = 31, ///< Media Format: IA44. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_AI44 = 32, ///< Media Format: AI44. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_Y416 = 33, ///< Media Format: Y416. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_Y210 = 34, ///< Media Format: Y210. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_I420 = 35, ///< Media Format: I420. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_YV12 = 36, ///< Media Format: YV12. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_400P = 37, ///< Media Format: 400P. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_422H = 38, ///< Media Format: 422H. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_422V = 39, ///< Media Format: 422V. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_444P = 40, ///< Media Format: 444P. Format type and swizzle is ignored for this. - ZE_IMAGE_FORMAT_LAYOUT_FORCE_UINT32 = 0x7fffffff - -} ze_image_format_layout_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported image format types -typedef enum _ze_image_format_type_t -{ - ZE_IMAGE_FORMAT_TYPE_UINT = 0, ///< Unsigned integer - ZE_IMAGE_FORMAT_TYPE_SINT = 1, ///< Signed integer - ZE_IMAGE_FORMAT_TYPE_UNORM = 2, ///< Unsigned normalized integer - ZE_IMAGE_FORMAT_TYPE_SNORM = 3, ///< Signed normalized integer - ZE_IMAGE_FORMAT_TYPE_FLOAT = 4, ///< Float - ZE_IMAGE_FORMAT_TYPE_FORCE_UINT32 = 0x7fffffff - -} ze_image_format_type_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported image format component swizzle into channel -typedef enum _ze_image_format_swizzle_t -{ - ZE_IMAGE_FORMAT_SWIZZLE_R = 0, ///< Red component - ZE_IMAGE_FORMAT_SWIZZLE_G = 1, ///< Green component - ZE_IMAGE_FORMAT_SWIZZLE_B = 2, ///< Blue component - ZE_IMAGE_FORMAT_SWIZZLE_A = 3, ///< Alpha component - ZE_IMAGE_FORMAT_SWIZZLE_0 = 4, ///< Zero - ZE_IMAGE_FORMAT_SWIZZLE_1 = 5, ///< One - ZE_IMAGE_FORMAT_SWIZZLE_X = 6, ///< Don't care - ZE_IMAGE_FORMAT_SWIZZLE_FORCE_UINT32 = 0x7fffffff - -} ze_image_format_swizzle_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Image format -typedef struct _ze_image_format_t -{ - ze_image_format_layout_t layout; ///< [in] image format component layout - ze_image_format_type_t type; ///< [in] image format type. Media formats can't be used for - ///< ::ZE_IMAGE_TYPE_BUFFER. - ze_image_format_swizzle_t x; ///< [in] image component swizzle into channel x - ze_image_format_swizzle_t y; ///< [in] image component swizzle into channel y - ze_image_format_swizzle_t z; ///< [in] image component swizzle into channel z - ze_image_format_swizzle_t w; ///< [in] image component swizzle into channel w - -} ze_image_format_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Image descriptor -typedef struct _ze_image_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_image_flags_t flags; ///< [in] creation flags. - ///< must be 0 (default) or a valid combination of ::ze_image_flag_t; - ///< default is read-only, cached access. - ze_image_type_t type; ///< [in] image type - ze_image_format_t format; ///< [in] image format - uint64_t width; ///< [in] width dimension. - ///< ::ZE_IMAGE_TYPE_BUFFER: size in bytes; see - ///< ::ze_device_image_properties_t.maxImageBufferSize for limits. - ///< ::ZE_IMAGE_TYPE_1D, ::ZE_IMAGE_TYPE_1DARRAY: width in pixels; see - ///< ::ze_device_image_properties_t.maxImageDims1D for limits. - ///< ::ZE_IMAGE_TYPE_2D, ::ZE_IMAGE_TYPE_2DARRAY: width in pixels; see - ///< ::ze_device_image_properties_t.maxImageDims2D for limits. - ///< ::ZE_IMAGE_TYPE_3D: width in pixels; see - ///< ::ze_device_image_properties_t.maxImageDims3D for limits. - uint32_t height; ///< [in] height dimension. - ///< ::ZE_IMAGE_TYPE_2D, ::ZE_IMAGE_TYPE_2DARRAY: height in pixels; see - ///< ::ze_device_image_properties_t.maxImageDims2D for limits. - ///< ::ZE_IMAGE_TYPE_3D: height in pixels; see - ///< ::ze_device_image_properties_t.maxImageDims3D for limits. - ///< other: ignored. - uint32_t depth; ///< [in] depth dimension. - ///< ::ZE_IMAGE_TYPE_3D: depth in pixels; see - ///< ::ze_device_image_properties_t.maxImageDims3D for limits. - ///< other: ignored. - uint32_t arraylevels; ///< [in] array levels. - ///< ::ZE_IMAGE_TYPE_1DARRAY, ::ZE_IMAGE_TYPE_2DARRAY: see - ///< ::ze_device_image_properties_t.maxImageArraySlices for limits. - ///< other: ignored. - uint32_t miplevels; ///< [in] mipmap levels (must be 0) - -} ze_image_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported sampler filtering flags -typedef uint32_t ze_image_sampler_filter_flags_t; -typedef enum _ze_image_sampler_filter_flag_t -{ - ZE_IMAGE_SAMPLER_FILTER_FLAG_POINT = ZE_BIT(0), ///< device supports point filtering - ZE_IMAGE_SAMPLER_FILTER_FLAG_LINEAR = ZE_BIT(1),///< device supports linear filtering - ZE_IMAGE_SAMPLER_FILTER_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_image_sampler_filter_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Image properties -typedef struct _ze_image_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_image_sampler_filter_flags_t samplerFilterFlags; ///< [out] supported sampler filtering. - ///< returns 0 (unsupported) or a combination of ::ze_image_sampler_filter_flag_t. - -} ze_image_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves supported properties of an image. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == pImageProperties` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x3 < desc->flags` -/// + `::ZE_IMAGE_TYPE_BUFFER < desc->type` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeImageGetProperties( - ze_device_handle_t hDevice, ///< [in] handle of the device - const ze_image_desc_t* desc, ///< [in] pointer to image descriptor - ze_image_properties_t* pImageProperties ///< [out] pointer to image properties - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates an image on the context. -/// -/// @details -/// - The application must only use the image for the device, or its -/// sub-devices, which was provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @remarks -/// _Analogues_ -/// - clCreateImage -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phImage` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x3 < desc->flags` -/// + `::ZE_IMAGE_TYPE_BUFFER < desc->type` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_IMAGE_FORMAT -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeImageCreate( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device - const ze_image_desc_t* desc, ///< [in] pointer to image descriptor - ze_image_handle_t* phImage ///< [out] pointer to handle of image object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Deletes an image object. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the image before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this image. -/// - The application must **not** call this function from simultaneous -/// threads with the same image handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hImage` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeImageDestroy( - ze_image_handle_t hImage ///< [in][release] handle of image object to destroy - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Memory -#if !defined(__GNUC__) -#pragma region memory -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported memory allocation flags -typedef uint32_t ze_device_mem_alloc_flags_t; -typedef enum _ze_device_mem_alloc_flag_t -{ - ZE_DEVICE_MEM_ALLOC_FLAG_BIAS_CACHED = ZE_BIT(0), ///< device should cache allocation - ZE_DEVICE_MEM_ALLOC_FLAG_BIAS_UNCACHED = ZE_BIT(1), ///< device should not cache allocation (UC) - ZE_DEVICE_MEM_ALLOC_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_mem_alloc_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device memory allocation descriptor -typedef struct _ze_device_mem_alloc_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_device_mem_alloc_flags_t flags; ///< [in] flags specifying additional allocation controls. - ///< must be 0 (default) or a valid combination of ::ze_device_mem_alloc_flag_t; - ///< default behavior may use implicit driver-based heuristics. - uint32_t ordinal; ///< [in] ordinal of the device's local memory to allocate from. - ///< must be less than the count returned from ::zeDeviceGetMemoryProperties. - -} ze_device_mem_alloc_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported host memory allocation flags -typedef uint32_t ze_host_mem_alloc_flags_t; -typedef enum _ze_host_mem_alloc_flag_t -{ - ZE_HOST_MEM_ALLOC_FLAG_BIAS_CACHED = ZE_BIT(0), ///< host should cache allocation - ZE_HOST_MEM_ALLOC_FLAG_BIAS_UNCACHED = ZE_BIT(1), ///< host should not cache allocation (UC) - ZE_HOST_MEM_ALLOC_FLAG_BIAS_WRITE_COMBINED = ZE_BIT(2), ///< host memory should be allocated write-combined (WC) - ZE_HOST_MEM_ALLOC_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_host_mem_alloc_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Host memory allocation descriptor -typedef struct _ze_host_mem_alloc_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_host_mem_alloc_flags_t flags; ///< [in] flags specifying additional allocation controls. - ///< must be 0 (default) or a valid combination of ::ze_host_mem_alloc_flag_t; - ///< default behavior may use implicit driver-based heuristics. - -} ze_host_mem_alloc_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Allocates shared memory on the context. -/// -/// @details -/// - Shared allocations share ownership between the host and one or more -/// devices. -/// - Shared allocations may optionally be associated with a device by -/// passing a handle to the device. -/// - Devices supporting only single-device shared access capabilities may -/// access shared memory associated with the device. -/// For these devices, ownership of the allocation is shared between the -/// host and the associated device only. -/// - Passing nullptr as the device handle does not associate the shared -/// allocation with any device. -/// For allocations with no associated device, ownership of the allocation -/// is shared between the host and all devices supporting cross-device -/// shared access capabilities. -/// - The application must only use the memory allocation for the context -/// and device, or its sub-devices, which was provided during allocation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == device_desc` -/// + `nullptr == host_desc` -/// + `nullptr == pptr` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x3 < device_desc->flags` -/// + `0x7 < host_desc->flags` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT -/// + Must be zero or a power-of-two -/// + `0 != (alignment & (alignment - 1))` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemAllocShared( - ze_context_handle_t hContext, ///< [in] handle of the context object - const ze_device_mem_alloc_desc_t* device_desc, ///< [in] pointer to device memory allocation descriptor - const ze_host_mem_alloc_desc_t* host_desc, ///< [in] pointer to host memory allocation descriptor - size_t size, ///< [in] size in bytes to allocate; must be less-than - ///< ::ze_device_properties_t.maxMemAllocSize. - size_t alignment, ///< [in] minimum alignment in bytes for the allocation; must be a power of - ///< two. - ze_device_handle_t hDevice, ///< [in][optional] device handle to associate with - void** pptr ///< [out] pointer to shared allocation - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Allocates device memory on the context. -/// -/// @details -/// - Device allocations are owned by a specific device. -/// - In general, a device allocation may only be accessed by the device -/// that owns it. -/// - The application must only use the memory allocation for the context -/// and device, or its sub-devices, which was provided during allocation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == device_desc` -/// + `nullptr == pptr` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x3 < device_desc->flags` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT -/// + Must be zero or a power-of-two -/// + `0 != (alignment & (alignment - 1))` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemAllocDevice( - ze_context_handle_t hContext, ///< [in] handle of the context object - const ze_device_mem_alloc_desc_t* device_desc, ///< [in] pointer to device memory allocation descriptor - size_t size, ///< [in] size in bytes to allocate; must be less-than - ///< ::ze_device_properties_t.maxMemAllocSize. - size_t alignment, ///< [in] minimum alignment in bytes for the allocation; must be a power of - ///< two. - ze_device_handle_t hDevice, ///< [in] handle of the device - void** pptr ///< [out] pointer to device allocation - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Allocates host memory on the context. -/// -/// @details -/// - Host allocations are owned by the host process. -/// - Host allocations are accessible by the host and all devices within the -/// driver's context. -/// - Host allocations are frequently used as staging areas to transfer data -/// to or from devices. -/// - The application must only use the memory allocation for the context -/// which was provided during allocation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == host_desc` -/// + `nullptr == pptr` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x7 < host_desc->flags` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT -/// + Must be zero or a power-of-two -/// + `0 != (alignment & (alignment - 1))` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemAllocHost( - ze_context_handle_t hContext, ///< [in] handle of the context object - const ze_host_mem_alloc_desc_t* host_desc, ///< [in] pointer to host memory allocation descriptor - size_t size, ///< [in] size in bytes to allocate; must be less-than - ///< ::ze_device_properties_t.maxMemAllocSize. - size_t alignment, ///< [in] minimum alignment in bytes for the allocation; must be a power of - ///< two. - void** pptr ///< [out] pointer to host allocation - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Frees allocated host memory, device memory, or shared memory on the -/// context. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the memory before it is freed -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this memory -/// - The application must **not** call this function from simultaneous -/// threads with the same pointer. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemFree( - ze_context_handle_t hContext, ///< [in] handle of the context object - void* ptr ///< [in][release] pointer to memory to free - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Memory allocation type -typedef enum _ze_memory_type_t -{ - ZE_MEMORY_TYPE_UNKNOWN = 0, ///< the memory pointed to is of unknown type - ZE_MEMORY_TYPE_HOST = 1, ///< the memory pointed to is a host allocation - ZE_MEMORY_TYPE_DEVICE = 2, ///< the memory pointed to is a device allocation - ZE_MEMORY_TYPE_SHARED = 3, ///< the memory pointed to is a shared ownership allocation - ZE_MEMORY_TYPE_FORCE_UINT32 = 0x7fffffff - -} ze_memory_type_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Memory allocation properties queried using ::zeMemGetAllocProperties -typedef struct _ze_memory_allocation_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_memory_type_t type; ///< [out] type of allocated memory - uint64_t id; ///< [out] identifier for this allocation - uint64_t pageSize; ///< [out] page size used for allocation - -} ze_memory_allocation_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves attributes of a memory allocation -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The application may query attributes of a memory allocation unrelated -/// to the context. -/// When this occurs, the returned allocation type will be -/// ::ZE_MEMORY_TYPE_UNKNOWN, and the returned identifier and associated -/// device is unspecified. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// + `nullptr == pMemAllocProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemGetAllocProperties( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] memory pointer to query - ze_memory_allocation_properties_t* pMemAllocProperties, ///< [in,out] query result for memory allocation properties - ze_device_handle_t* phDevice ///< [out][optional] device associated with this allocation - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves the base address and/or size of an allocation -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemGetAddressRange( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] memory pointer to query - void** pBase, ///< [in,out][optional] base address of the allocation - size_t* pSize ///< [in,out][optional] size of the allocation - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates an IPC memory handle for the specified allocation -/// -/// @details -/// - Takes a pointer to a device memory allocation and creates an IPC -/// memory handle for exporting it for use in another process. -/// - The pointer must be base pointer of the device memory allocation; i.e. -/// the value returned from ::zeMemAllocDevice. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// + `nullptr == pIpcHandle` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemGetIpcHandle( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] pointer to the device memory allocation - ze_ipc_mem_handle_t* pIpcHandle ///< [out] Returned IPC memory handle - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported IPC memory flags -typedef uint32_t ze_ipc_memory_flags_t; -typedef enum _ze_ipc_memory_flag_t -{ - ZE_IPC_MEMORY_FLAG_TBD = ZE_BIT(0), ///< reserved for future use - ZE_IPC_MEMORY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_ipc_memory_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Opens an IPC memory handle to retrieve a device pointer on the -/// context. -/// -/// @details -/// - Takes an IPC memory handle from a remote process and associates it -/// with a device pointer usable in this process. -/// - The device pointer in this process should not be freed with -/// ::zeMemFree, but rather with ::zeMemCloseIpcHandle. -/// - Multiple calls to this function with the same IPC handle will return -/// unique pointers. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < flags` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pptr` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemOpenIpcHandle( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device to associate with the IPC memory handle - ze_ipc_mem_handle_t handle, ///< [in] IPC memory handle - ze_ipc_memory_flags_t flags, ///< [in] flags controlling the operation. - ///< must be 0 (default) or a valid combination of ::ze_ipc_memory_flag_t. - void** pptr ///< [out] pointer to device allocation in this process - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Closes an IPC memory handle -/// -/// @details -/// - Closes an IPC memory handle by unmapping memory that was opened in -/// this process using ::zeMemOpenIpcHandle. -/// - The application must **not** call this function from simultaneous -/// threads with the same pointer. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeMemCloseIpcHandle( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr ///< [in][release] pointer to device allocation in this process - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Additional allocation descriptor for exporting external memory -/// -/// @details -/// - This structure may be passed to ::zeMemAllocDevice, via the `pNext` -/// member of ::ze_device_mem_alloc_desc_t, to indicate an exportable -/// memory allocation. -/// - This structure may be passed to ::zeImageCreate, via the `pNext` -/// member of ::ze_image_desc_t, to indicate an exportable image. -typedef struct _ze_external_memory_export_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_external_memory_type_flags_t flags; ///< [in] flags specifying memory export types for this allocation. - ///< must be 0 (default) or a valid combination of ::ze_external_memory_type_flags_t - -} ze_external_memory_export_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Additional allocation descriptor for importing external memory as a -/// file descriptor -/// -/// @details -/// - This structure may be passed to ::zeMemAllocDevice, via the `pNext` -/// member of ::ze_device_mem_alloc_desc_t, to import memory from a file -/// descriptor. -/// - This structure may be passed to ::zeImageCreate, via the `pNext` -/// member of ::ze_image_desc_t, to import memory from a file descriptor. -typedef struct _ze_external_memory_import_fd_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_external_memory_type_flags_t flags; ///< [in] flags specifying the memory import type for the file descriptor. - ///< must be 0 (default) or a valid combination of ::ze_external_memory_type_flags_t - int fd; ///< [in] the file descriptor handle to import - -} ze_external_memory_import_fd_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Exports an allocation as a file descriptor -/// -/// @details -/// - This structure may be passed to ::zeMemGetAllocProperties, via the -/// `pNext` member of ::ze_memory_allocation_properties_t, to export a -/// memory allocation as a file descriptor. -/// - This structure may be passed to ::zeImageGetProperties, via the -/// `pNext` member of ::ze_image_properties_t, to export an image as a -/// file descriptor. -/// - The requested memory export type must have been specified when the -/// allocation was made. -typedef struct _ze_external_memory_export_fd_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_external_memory_type_flags_t flags; ///< [in] flags specifying the memory export type for the file descriptor. - ///< must be 0 (default) or a valid combination of ::ze_external_memory_type_flags_t - int fd; ///< [out] the exported file descriptor handle representing the allocation. - -} ze_external_memory_export_fd_t; - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Module -#if !defined(__GNUC__) -#pragma region module -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported module creation input formats -typedef enum _ze_module_format_t -{ - ZE_MODULE_FORMAT_IL_SPIRV = 0, ///< Format is SPIRV IL format - ZE_MODULE_FORMAT_NATIVE = 1, ///< Format is device native format - ZE_MODULE_FORMAT_FORCE_UINT32 = 0x7fffffff - -} ze_module_format_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Specialization constants - User defined constants -typedef struct _ze_module_constants_t -{ - uint32_t numConstants; ///< [in] Number of specialization constants. - const uint32_t* pConstantIds; ///< [in][range(0, numConstants)] Array of IDs that is sized to - ///< numConstants. - const void** pConstantValues; ///< [in][range(0, numConstants)] Array of pointers to values that is sized - ///< to numConstants. - -} ze_module_constants_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Module descriptor -typedef struct _ze_module_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_module_format_t format; ///< [in] Module format passed in with pInputModule - size_t inputSize; ///< [in] size of input IL or ISA from pInputModule. - const uint8_t* pInputModule; ///< [in] pointer to IL or ISA - const char* pBuildFlags; ///< [in][optional] string containing compiler flags. Following options are supported. - ///< - "-ze-opt-disable" - ///< - Disable optimizations - ///< - "-ze-opt-greater-than-4GB-buffer-required" - ///< - Use 64-bit offset calculations for buffers. - ///< - "-ze-opt-large-register-file" - ///< - Increase number of registers available to threads. - ///< - "-ze-opt-has-buffer-offset-arg" - ///< - Extend stateless to stateful optimization to more - ///< cases with the use of additional offset (e.g. 64-bit - ///< pointer to binding table with 32-bit offset). - ///< - "-g" - ///< - Include debugging information. - const ze_module_constants_t* pConstants; ///< [in][optional] pointer to specialization constants. Valid only for - ///< SPIR-V input. This must be set to nullptr if no specialization - ///< constants are provided. - -} ze_module_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a module on the context. -/// -/// @details -/// - Compiles the module for execution on the device. -/// - The application must only use the module for the device, or its -/// sub-devices, which was provided during creation. -/// - The module can be copied to other devices and contexts within the same -/// driver instance by using ::zeModuleGetNativeBinary. -/// - A build log can optionally be returned to the caller. The caller is -/// responsible for destroying build log using ::zeModuleBuildLogDestroy. -/// - The module descriptor constants are only supported for SPIR-V -/// specialization constants. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == desc->pInputModule` -/// + `nullptr == phModule` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `::ZE_MODULE_FORMAT_NATIVE < desc->format` -/// - ::ZE_RESULT_ERROR_INVALID_NATIVE_BINARY -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `0 == desc->inputSize` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -/// - ::ZE_RESULT_ERROR_MODULE_BUILD_FAILURE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleCreate( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device - const ze_module_desc_t* desc, ///< [in] pointer to module descriptor - ze_module_handle_t* phModule, ///< [out] pointer to handle of module object created - ze_module_build_log_handle_t* phBuildLog ///< [out][optional] pointer to handle of module's build log. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys module -/// -/// @details -/// - The application must destroy all kernel and build log handles created -/// from the module before destroying the module itself. -/// - The application must ensure the device is not currently referencing -/// the module before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this module. -/// - The application must **not** call this function from simultaneous -/// threads with the same module handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModule` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleDestroy( - ze_module_handle_t hModule ///< [in][release] handle of the module - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Dynamically link modules together that share import/export linkage -/// dependencies. -/// -/// @details -/// - Modules support import and export linkage for functions and global -/// variables. -/// - Modules that have imports can be dynamically linked to export modules -/// that satisfy those import requirements. -/// - Modules can have both import and export linkages. -/// - Modules that do not have any imports or exports do not need to be -/// linked. -/// - Modules cannot be partially linked. All modules needed to satisfy all -/// import dependencies for a module must be passed in or -/// ::ZE_RESULT_ERROR_MODULE_LINK_FAILURE will returned. -/// - Modules with imports need to be linked before kernel objects can be -/// created from them. -/// - Modules will only be linked once. A module can be used in multiple -/// link calls if it has exports but it's imports will not be re-linked. -/// - Ambiguous dependencies, where multiple modules satisfy the import -/// dependencies for another module, is not allowed. -/// - ModuleGetNativeBinary can be called on any module regardless of -/// whether it is linked or not. -/// - A link log can optionally be returned to the caller. The caller is -/// responsible for destroying build log using ::zeModuleBuildLogDestroy. -/// - See SPIR-V specification for linkage details. -/// - The application must ensure the modules being linked were created on -/// the same context. -/// - The application may call this function from simultaneous threads as -/// long as the import modules being linked are not the same. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == phModules` -/// - ::ZE_RESULT_ERROR_MODULE_LINK_FAILURE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleDynamicLink( - uint32_t numModules, ///< [in] number of modules to be linked pointed to by phModules. - ze_module_handle_t* phModules, ///< [in][range(0, numModules)] pointer to an array of modules to - ///< dynamically link together. - ze_module_build_log_handle_t* phLinkLog ///< [out][optional] pointer to handle of dynamic link log. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys module build log object -/// -/// @details -/// - The implementation of this function may immediately free all Host -/// allocations associated with this object. -/// - The application must **not** call this function from simultaneous -/// threads with the same build log handle. -/// - The implementation of this function should be lock-free. -/// - This function can be called before or after ::zeModuleDestroy for the -/// associated module. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModuleBuildLog` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleBuildLogDestroy( - ze_module_build_log_handle_t hModuleBuildLog ///< [in][release] handle of the module build log object. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieves text string for build log. -/// -/// @details -/// - The caller can pass nullptr for pBuildLog when querying only for size. -/// - The caller must provide memory for build log. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModuleBuildLog` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pSize` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleBuildLogGetString( - ze_module_build_log_handle_t hModuleBuildLog, ///< [in] handle of the module build log object. - size_t* pSize, ///< [in,out] size of build log string. - char* pBuildLog ///< [in,out][optional] pointer to null-terminated string of the log. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve native binary from Module. -/// -/// @details -/// - The native binary output can be cached to disk and new modules can be -/// later constructed from the cached copy. -/// - The native binary will retain debugging information that is associated -/// with a module. -/// - The caller can pass nullptr for pModuleNativeBinary when querying only -/// for size. -/// - The implementation will copy the native binary into a buffer supplied -/// by the caller. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModule` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pSize` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleGetNativeBinary( - ze_module_handle_t hModule, ///< [in] handle of the module - size_t* pSize, ///< [in,out] size of native binary in bytes. - uint8_t* pModuleNativeBinary ///< [in,out][optional] byte pointer to native binary - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve global variable pointer from Module. -/// -/// @details -/// - The application may query global pointer from any module that either -/// exports or imports it. -/// - The application must dynamically link a module that imports a global -/// before the global pointer can be queried from it. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModule` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pGlobalName` -/// - ::ZE_RESULT_ERROR_INVALID_GLOBAL_NAME -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleGetGlobalPointer( - ze_module_handle_t hModule, ///< [in] handle of the module - const char* pGlobalName, ///< [in] name of global variable in module - size_t* pSize, ///< [in,out][optional] size of global variable - void** pptr ///< [in,out][optional] device visible pointer - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve all kernel names in the module. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModule` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleGetKernelNames( - ze_module_handle_t hModule, ///< [in] handle of the module - uint32_t* pCount, ///< [in,out] pointer to the number of names. - ///< if count is zero, then the driver shall update the value with the - ///< total number of names available. - ///< if count is greater than the number of names available, then the - ///< driver shall update the value with the correct number of names available. - const char** pNames ///< [in,out][optional][range(0, *pCount)] array of names of functions. - ///< if count is less than the number of names available, then driver shall - ///< only retrieve that number of names. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported module property flags -typedef uint32_t ze_module_property_flags_t; -typedef enum _ze_module_property_flag_t -{ - ZE_MODULE_PROPERTY_FLAG_IMPORTS = ZE_BIT(0), ///< Module has imports (i.e. imported global variables and/or kernels). - ///< See ::zeModuleDynamicLink. - ZE_MODULE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_module_property_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Module properties -typedef struct _ze_module_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_module_property_flags_t flags; ///< [out] 0 (none) or a valid combination of ::ze_module_property_flag_t - -} ze_module_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve module properties. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModule` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pModuleProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleGetProperties( - ze_module_handle_t hModule, ///< [in] handle of the module - ze_module_properties_t* pModuleProperties ///< [in,out] query result for module properties. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported kernel creation flags -typedef uint32_t ze_kernel_flags_t; -typedef enum _ze_kernel_flag_t -{ - ZE_KERNEL_FLAG_FORCE_RESIDENCY = ZE_BIT(0), ///< force all device allocations to be resident during execution - ZE_KERNEL_FLAG_EXPLICIT_RESIDENCY = ZE_BIT(1), ///< application is responsible for all residency of device allocations. - ///< driver may disable implicit residency management. - ZE_KERNEL_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_kernel_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Kernel descriptor -typedef struct _ze_kernel_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_kernel_flags_t flags; ///< [in] creation flags. - ///< must be 0 (default) or a valid combination of ::ze_kernel_flag_t; - ///< default behavior may use driver-based residency. - const char* pKernelName; ///< [in] null-terminated name of kernel in module - -} ze_kernel_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Create a kernel from the module. -/// -/// @details -/// - Modules that have unresolved imports need to be dynamically linked -/// before a kernel can be created from them. (See ::zeModuleDynamicLink) -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModule` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == desc->pKernelName` -/// + `nullptr == phKernel` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x3 < desc->flags` -/// - ::ZE_RESULT_ERROR_INVALID_KERNEL_NAME -/// - ::ZE_RESULT_ERROR_INVALID_MODULE_UNLINKED -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelCreate( - ze_module_handle_t hModule, ///< [in] handle of the module - const ze_kernel_desc_t* desc, ///< [in] pointer to kernel descriptor - ze_kernel_handle_t* phKernel ///< [out] handle of the Function object - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys a kernel object -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the kernel before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this kernel. -/// - The application must **not** call this function from simultaneous -/// threads with the same kernel handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelDestroy( - ze_kernel_handle_t hKernel ///< [in][release] handle of the kernel object - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve a function pointer from a module by name -/// -/// @details -/// - The function pointer is unique for the device on which the module was -/// created. -/// - The function pointer is no longer valid if module is destroyed. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hModule` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pFunctionName` -/// + `nullptr == pfnFunction` -/// - ::ZE_RESULT_ERROR_INVALID_FUNCTION_NAME -ZE_APIEXPORT ze_result_t ZE_APICALL -zeModuleGetFunctionPointer( - ze_module_handle_t hModule, ///< [in] handle of the module - const char* pFunctionName, ///< [in] Name of function to retrieve function pointer for. - void** pfnFunction ///< [out] pointer to function. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Set group size for a kernel on the current Host thread. -/// -/// @details -/// - The group size will be used when a ::zeCommandListAppendLaunchKernel -/// variant is called. -/// - The application must **not** call this function from simultaneous -/// threads with the same kernel handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_GROUP_SIZE_DIMENSION -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelSetGroupSize( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - uint32_t groupSizeX, ///< [in] group size for X dimension to use for this kernel - uint32_t groupSizeY, ///< [in] group size for Y dimension to use for this kernel - uint32_t groupSizeZ ///< [in] group size for Z dimension to use for this kernel - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Query a suggested group size for a kernel given a global size for each -/// dimension. -/// -/// @details -/// - This function ignores the group size that is set using -/// ::zeKernelSetGroupSize. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == groupSizeX` -/// + `nullptr == groupSizeY` -/// + `nullptr == groupSizeZ` -/// - ::ZE_RESULT_ERROR_INVALID_GLOBAL_WIDTH_DIMENSION -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelSuggestGroupSize( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - uint32_t globalSizeX, ///< [in] global width for X dimension - uint32_t globalSizeY, ///< [in] global width for Y dimension - uint32_t globalSizeZ, ///< [in] global width for Z dimension - uint32_t* groupSizeX, ///< [out] recommended size of group for X dimension - uint32_t* groupSizeY, ///< [out] recommended size of group for Y dimension - uint32_t* groupSizeZ ///< [out] recommended size of group for Z dimension - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Query a suggested max group count for a cooperative kernel. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == totalGroupCount` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelSuggestMaxCooperativeGroupCount( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - uint32_t* totalGroupCount ///< [out] recommended total group count. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Set kernel argument for a kernel on the current Host thread. -/// -/// @details -/// - The argument values will be used when a -/// ::zeCommandListAppendLaunchKernel variant is called. -/// - The application must **not** call this function from simultaneous -/// threads with the same kernel handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_INDEX -/// - ::ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_SIZE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelSetArgumentValue( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - uint32_t argIndex, ///< [in] argument index in range [0, num args - 1] - size_t argSize, ///< [in] size of argument type - const void* pArgValue ///< [in][optional] argument value represented as matching arg type. If - ///< null then argument value is considered null. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Kernel indirect access flags -typedef uint32_t ze_kernel_indirect_access_flags_t; -typedef enum _ze_kernel_indirect_access_flag_t -{ - ZE_KERNEL_INDIRECT_ACCESS_FLAG_HOST = ZE_BIT(0),///< Indicates that the kernel accesses host allocations indirectly. - ZE_KERNEL_INDIRECT_ACCESS_FLAG_DEVICE = ZE_BIT(1), ///< Indicates that the kernel accesses device allocations indirectly. - ZE_KERNEL_INDIRECT_ACCESS_FLAG_SHARED = ZE_BIT(2), ///< Indicates that the kernel accesses shared allocations indirectly. - ZE_KERNEL_INDIRECT_ACCESS_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_kernel_indirect_access_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Sets kernel indirect access flags. -/// -/// @details -/// - The application should specify which allocations will be indirectly -/// accessed by the kernel to allow driver to optimize which allocations -/// are made resident -/// - This function may **not** be called from simultaneous threads with the -/// same Kernel handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x7 < flags` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelSetIndirectAccess( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - ze_kernel_indirect_access_flags_t flags ///< [in] kernel indirect access flags - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve kernel indirect access flags. -/// -/// @details -/// - This function may be called from simultaneous threads with the same -/// Kernel handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pFlags` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelGetIndirectAccess( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - ze_kernel_indirect_access_flags_t* pFlags ///< [out] query result for kernel indirect access flags. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve all declared kernel attributes (i.e. can be specified with -/// __attribute__ in runtime language). -/// -/// @details -/// - This function may be called from simultaneous threads with the same -/// Kernel handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pSize` -/// + `nullptr == pString` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelGetSourceAttributes( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - uint32_t* pSize, ///< [in,out] pointer to size of string in bytes. - char** pString ///< [in,out] pointer to null-terminated string, whose lifetime is tied to - ///< the kernel object, where kernel source attributes are separated by - ///< space. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported Cache Config flags -typedef uint32_t ze_cache_config_flags_t; -typedef enum _ze_cache_config_flag_t -{ - ZE_CACHE_CONFIG_FLAG_LARGE_SLM = ZE_BIT(0), ///< Large SLM size - ZE_CACHE_CONFIG_FLAG_LARGE_DATA = ZE_BIT(1), ///< Large General Data size - ZE_CACHE_CONFIG_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_cache_config_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Sets the preferred cache configuration for a kernel on the current -/// Host thread. -/// -/// @details -/// - The cache configuration will be used when a -/// ::zeCommandListAppendLaunchKernel variant is called. -/// - The application must **not** call this function from simultaneous -/// threads with the same kernel handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x3 < flags` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_FEATURE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelSetCacheConfig( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - ze_cache_config_flags_t flags ///< [in] cache configuration. - ///< must be 0 (default configuration) or a valid combination of ::ze_cache_config_flag_t. - ); - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_KERNEL_UUID_SIZE -/// @brief Maximum kernel universal unique id (UUID) size in bytes -#define ZE_MAX_KERNEL_UUID_SIZE 16 -#endif // ZE_MAX_KERNEL_UUID_SIZE - -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MAX_MODULE_UUID_SIZE -/// @brief Maximum module universal unique id (UUID) size in bytes -#define ZE_MAX_MODULE_UUID_SIZE 16 -#endif // ZE_MAX_MODULE_UUID_SIZE - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Kernel universal unique id (UUID) -typedef struct _ze_kernel_uuid_t -{ - uint8_t kid[ZE_MAX_KERNEL_UUID_SIZE]; ///< [out] opaque data representing a kernel UUID - uint8_t mid[ZE_MAX_MODULE_UUID_SIZE]; ///< [out] opaque data representing the kernel's module UUID - -} ze_kernel_uuid_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Kernel properties -typedef struct _ze_kernel_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - uint32_t numKernelArgs; ///< [out] number of kernel arguments. - uint32_t requiredGroupSizeX; ///< [out] required group size in the X dimension, - ///< or zero if there is no required group size - uint32_t requiredGroupSizeY; ///< [out] required group size in the Y dimension, - ///< or zero if there is no required group size - uint32_t requiredGroupSizeZ; ///< [out] required group size in the Z dimension, - ///< or zero if there is no required group size - uint32_t requiredNumSubGroups; ///< [out] required number of subgroups per thread group, - ///< or zero if there is no required number of subgroups - uint32_t requiredSubgroupSize; ///< [out] required subgroup size, - ///< or zero if there is no required subgroup size - uint32_t maxSubgroupSize; ///< [out] maximum subgroup size - uint32_t maxNumSubgroups; ///< [out] maximum number of subgroups per thread group - uint32_t localMemSize; ///< [out] local memory size used by each thread group - uint32_t privateMemSize; ///< [out] private memory size allocated by compiler used by each thread - uint32_t spillMemSize; ///< [out] spill memory size allocated by compiler - ze_kernel_uuid_t uuid; ///< [out] universal unique identifier. - -} ze_kernel_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve kernel properties. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pKernelProperties` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelGetProperties( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - ze_kernel_properties_t* pKernelProperties ///< [in,out] query result for kernel properties. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Retrieve kernel name from Kernel. -/// -/// @details -/// - The caller can pass nullptr for pName when querying only for size. -/// - The implementation will copy the kernel name into a buffer supplied by -/// the caller. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pSize` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelGetName( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - size_t* pSize, ///< [in,out] size of kernel name string, including null terminator, in - ///< bytes. - char* pName ///< [in,out][optional] char pointer to kernel name. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Kernel dispatch group count. -typedef struct _ze_group_count_t -{ - uint32_t groupCountX; ///< [in] number of thread groups in X dimension - uint32_t groupCountY; ///< [in] number of thread groups in Y dimension - uint32_t groupCountZ; ///< [in] number of thread groups in Z dimension - -} ze_group_count_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Launch kernel over one or more work groups. -/// -/// @details -/// - The application must ensure the kernel and events are accessible by -/// the device on which the command list was created. -/// - This may **only** be called for a command list created with command -/// queue group ordinal that supports compute. -/// - The application must ensure the command list, kernel and events were -/// created on the same context. -/// - This function may **not** be called from simultaneous threads with the -/// same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pLaunchFuncArgs` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendLaunchKernel( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - const ze_group_count_t* pLaunchFuncArgs, ///< [in] thread group launch arguments - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Launch kernel cooperatively over one or more work groups. -/// -/// @details -/// - The application must ensure the kernel and events are accessible by -/// the device on which the command list was created. -/// - This may **only** be called for a command list created with command -/// queue group ordinal that supports compute. -/// - This may only be used for a command list that are submitted to command -/// queue with cooperative flag set. -/// - The application must ensure the command list, kernel and events were -/// created on the same context. -/// - This function may **not** be called from simultaneous threads with the -/// same command list handle. -/// - The implementation of this function should be lock-free. -/// - Use ::zeKernelSuggestMaxCooperativeGroupCount to recommend max group -/// count for device for cooperative functions that device supports. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pLaunchFuncArgs` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendLaunchCooperativeKernel( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - const ze_group_count_t* pLaunchFuncArgs, ///< [in] thread group launch arguments - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Launch kernel over one or more work groups using indirect arguments. -/// -/// @details -/// - The application must ensure the kernel and events are accessible by -/// the device on which the command list was created. -/// - The application must ensure the launch arguments are visible to the -/// device on which the command list was created. -/// - The implementation must not access the contents of the launch -/// arguments as they are free to be modified by either the Host or device -/// up until execution. -/// - This may **only** be called for a command list created with command -/// queue group ordinal that supports compute. -/// - The application must ensure the command list, kernel and events were -/// created, and the memory was allocated, on the same context. -/// - This function may **not** be called from simultaneous threads with the -/// same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// + `nullptr == hKernel` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pLaunchArgumentsBuffer` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendLaunchKernelIndirect( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - const ze_group_count_t* pLaunchArgumentsBuffer, ///< [in] pointer to device buffer that will contain thread group launch - ///< arguments - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Launch multiple kernels over one or more work groups using an array of -/// indirect arguments. -/// -/// @details -/// - The application must ensure the kernel and events are accessible by -/// the device on which the command list was created. -/// - The application must ensure the array of launch arguments and count -/// buffer are visible to the device on which the command list was -/// created. -/// - The implementation must not access the contents of the array of launch -/// arguments or count buffer as they are free to be modified by either -/// the Host or device up until execution. -/// - This may **only** be called for a command list created with command -/// queue group ordinal that supports compute. -/// - The application must enusre the command list, kernel and events were -/// created, and the memory was allocated, on the same context. -/// - This function may **not** be called from simultaneous threads with the -/// same command list handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hCommandList` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == phKernels` -/// + `nullptr == pCountBuffer` -/// + `nullptr == pLaunchArgumentsBuffer` -/// - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT -/// - ::ZE_RESULT_ERROR_INVALID_SIZE -/// + `(nullptr == phWaitEvents) && (0 < numWaitEvents)` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeCommandListAppendLaunchMultipleKernelsIndirect( - ze_command_list_handle_t hCommandList, ///< [in] handle of the command list - uint32_t numKernels, ///< [in] maximum number of kernels to launch - ze_kernel_handle_t* phKernels, ///< [in][range(0, numKernels)] handles of the kernel objects - const uint32_t* pCountBuffer, ///< [in] pointer to device memory location that will contain the actual - ///< number of kernels to launch; value must be less-than or equal-to - ///< numKernels - const ze_group_count_t* pLaunchArgumentsBuffer, ///< [in][range(0, numKernels)] pointer to device buffer that will contain - ///< a contiguous array of thread group launch arguments - ze_event_handle_t hSignalEvent, ///< [in][optional] handle of the event to signal on completion - uint32_t numWaitEvents, ///< [in][optional] number of events to wait on before launching; must be 0 - ///< if `nullptr == phWaitEvents` - ze_event_handle_t* phWaitEvents ///< [in][optional][range(0, numWaitEvents)] handle of the events to wait - ///< on before launching - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero Extension for supporting module programs. -#if !defined(__GNUC__) -#pragma region program -#endif -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_MODULE_PROGRAM_EXP_NAME -/// @brief Module Program Extension Name -#define ZE_MODULE_PROGRAM_EXP_NAME "ZE_experimental_module_program" -#endif // ZE_MODULE_PROGRAM_EXP_NAME - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Module Program Extension Version(s) -typedef enum _ze_module_program_exp_version_t -{ - ZE_MODULE_PROGRAM_EXP_VERSION_1_0 = ZE_MAKE_VERSION( 1, 0 ),///< version 1.0 - ZE_MODULE_PROGRAM_EXP_VERSION_CURRENT = ZE_MAKE_VERSION( 1, 0 ),///< latest known version - ZE_MODULE_PROGRAM_EXP_VERSION_FORCE_UINT32 = 0x7fffffff - -} ze_module_program_exp_version_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Module extended descriptor to support multiple input modules. -/// -/// @details -/// - Implementation must support ::ZE_experimental_module_program extension -/// - pInputModules, pBuildFlags, and pConstants from ::ze_module_desc_t is -/// ignored. -/// - Format in ::ze_module_desc_t needs to be set to -/// ::ZE_MODULE_FORMAT_IL_SPIRV. -typedef struct _ze_module_program_exp_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - uint32_t count; ///< [in] Count of input modules - const size_t* inputSizes; ///< [in][range(0, count)] sizes of each input IL module in pInputModules. - const uint8_t** pInputModules; ///< [in][range(0, count)] pointer to an array of IL (e.g. SPIR-V modules). - ///< Valid only for SPIR-V input. - const char** pBuildFlags; ///< [in][optional][range(0, count)] array of strings containing build - ///< flags. See pBuildFlags in ::ze_module_desc_t. - const ze_module_constants_t** pConstants; ///< [in][optional][range(0, count)] pointer to array of specialization - ///< constant strings. Valid only for SPIR-V input. This must be set to - ///< nullptr if no specialization constants are provided. - -} ze_module_program_exp_desc_t; - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero Extension APIs for Raytracing -#if !defined(__GNUC__) -#pragma region raytracing -#endif -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_RAYTRACING_EXT_NAME -/// @brief Raytracing Extension Name -#define ZE_RAYTRACING_EXT_NAME "ZE_extension_raytracing" -#endif // ZE_RAYTRACING_EXT_NAME - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Raytracing Extension Version(s) -typedef enum _ze_raytracing_ext_version_t -{ - ZE_RAYTRACING_EXT_VERSION_1_0 = ZE_MAKE_VERSION( 1, 0 ),///< version 1.0 - ZE_RAYTRACING_EXT_VERSION_CURRENT = ZE_MAKE_VERSION( 1, 0 ),///< latest known version - ZE_RAYTRACING_EXT_VERSION_FORCE_UINT32 = 0x7fffffff - -} ze_raytracing_ext_version_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported raytracing capability flags -typedef uint32_t ze_device_raytracing_ext_flags_t; -typedef enum _ze_device_raytracing_ext_flag_t -{ - ZE_DEVICE_RAYTRACING_EXT_FLAG_RAYQUERY = ZE_BIT(0), ///< Supports rayquery - ZE_DEVICE_RAYTRACING_EXT_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_raytracing_ext_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Raytracing properties queried using ::zeDeviceGetModuleProperties -/// -/// @details -/// - This structure may be returned from ::zeDeviceGetModuleProperties, via -/// `pNext` member of ::ze_device_module_properties_t. -typedef struct _ze_device_raytracing_ext_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_device_raytracing_ext_flags_t flags; ///< [out] 0 or a valid combination of ::ze_device_raytracing_ext_flags_t - uint32_t maxBVHLevels; ///< [out] Maximum number of BVH levels supported - -} ze_device_raytracing_ext_properties_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported raytracing memory allocation flags -typedef uint32_t ze_raytracing_mem_alloc_ext_flags_t; -typedef enum _ze_raytracing_mem_alloc_ext_flag_t -{ - ZE_RAYTRACING_MEM_ALLOC_EXT_FLAG_TBD = ZE_BIT(0), ///< reserved for future use - ZE_RAYTRACING_MEM_ALLOC_EXT_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_raytracing_mem_alloc_ext_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Raytracing memory allocation descriptor -/// -/// @details -/// - This structure must be passed to ::zeMemAllocShared or -/// ::zeMemAllocDevice, via `pNext` member of -/// ::ze_device_mem_alloc_desc_t, for any memory allocation that is to be -/// accessed by raytracing fixed-function of the device. -typedef struct _ze_raytracing_mem_alloc_ext_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_raytracing_mem_alloc_ext_flags_t flags; ///< [in] flags specifying additional allocation controls. - ///< must be 0 (default) or a valid combination of ::ze_raytracing_mem_alloc_ext_flag_t; - ///< default behavior may use implicit driver-based heuristics. - -} ze_raytracing_mem_alloc_ext_desc_t; - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Memory Residency -#if !defined(__GNUC__) -#pragma region residency -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Makes memory resident for the device. -/// -/// @details -/// - The application must ensure the memory is resident before being -/// referenced by the device -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextMakeMemoryResident( - ze_context_handle_t hContext, ///< [in] handle of context object - ze_device_handle_t hDevice, ///< [in] handle of the device - void* ptr, ///< [in] pointer to memory to make resident - size_t size ///< [in] size in bytes to make resident - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Allows memory to be evicted from the device. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the memory before it is evicted -/// - The application may free the memory without evicting; the memory is -/// implicitly evicted when freed. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextEvictMemory( - ze_context_handle_t hContext, ///< [in] handle of context object - ze_device_handle_t hDevice, ///< [in] handle of the device - void* ptr, ///< [in] pointer to memory to evict - size_t size ///< [in] size in bytes to evict - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Makes image resident for the device. -/// -/// @details -/// - The application must ensure the image is resident before being -/// referenced by the device -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// + `nullptr == hImage` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextMakeImageResident( - ze_context_handle_t hContext, ///< [in] handle of context object - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_image_handle_t hImage ///< [in] handle of image to make resident - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Allows image to be evicted from the device. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the image before it is evicted -/// - The application may destroy the image without evicting; the image is -/// implicitly evicted when destroyed. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// + `nullptr == hImage` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeContextEvictImage( - ze_context_handle_t hContext, ///< [in] handle of context object - ze_device_handle_t hDevice, ///< [in] handle of the device - ze_image_handle_t hImage ///< [in] handle of image to make evict - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Sampler -#if !defined(__GNUC__) -#pragma region sampler -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Sampler addressing modes -typedef enum _ze_sampler_address_mode_t -{ - ZE_SAMPLER_ADDRESS_MODE_NONE = 0, ///< No coordinate modifications for out-of-bounds image access. - ZE_SAMPLER_ADDRESS_MODE_REPEAT = 1, ///< Out-of-bounds coordinates are wrapped back around. - ZE_SAMPLER_ADDRESS_MODE_CLAMP = 2, ///< Out-of-bounds coordinates are clamped to edge. - ZE_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, ///< Out-of-bounds coordinates are clamped to border color which is (0.0f, - ///< 0.0f, 0.0f, 0.0f) if image format swizzle contains alpha, otherwise - ///< (0.0f, 0.0f, 0.0f, 1.0f). - ZE_SAMPLER_ADDRESS_MODE_MIRROR = 4, ///< Out-of-bounds coordinates are mirrored starting from edge. - ZE_SAMPLER_ADDRESS_MODE_FORCE_UINT32 = 0x7fffffff - -} ze_sampler_address_mode_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Sampler filtering modes -typedef enum _ze_sampler_filter_mode_t -{ - ZE_SAMPLER_FILTER_MODE_NEAREST = 0, ///< No coordinate modifications for out of bounds image access. - ZE_SAMPLER_FILTER_MODE_LINEAR = 1, ///< Out-of-bounds coordinates are wrapped back around. - ZE_SAMPLER_FILTER_MODE_FORCE_UINT32 = 0x7fffffff - -} ze_sampler_filter_mode_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Sampler descriptor -typedef struct _ze_sampler_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_sampler_address_mode_t addressMode; ///< [in] Sampler addressing mode to determine how out-of-bounds - ///< coordinates are handled. - ze_sampler_filter_mode_t filterMode; ///< [in] Sampler filter mode to determine how samples are filtered. - ze_bool_t isNormalized; ///< [in] Are coordinates normalized [0, 1] or not. - -} ze_sampler_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates sampler on the context. -/// -/// @details -/// - The application must only use the sampler for the device, or its -/// sub-devices, which was provided during creation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phSampler` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `::ZE_SAMPLER_ADDRESS_MODE_MIRROR < desc->addressMode` -/// + `::ZE_SAMPLER_FILTER_MODE_LINEAR < desc->filterMode` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeSamplerCreate( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device - const ze_sampler_desc_t* desc, ///< [in] pointer to sampler descriptor - ze_sampler_handle_t* phSampler ///< [out] handle of the sampler - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys sampler object -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the sampler before it is deleted. -/// - The implementation of this function may immediately free all Host and -/// Device allocations associated with this sampler. -/// - The application must **not** call this function from simultaneous -/// threads with the same sampler handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hSampler` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zeSamplerDestroy( - ze_sampler_handle_t hSampler ///< [in][release] handle of the sampler - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero APIs for Virtual Memory Management -#if !defined(__GNUC__) -#pragma region virtual -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Virtual memory page access attributes -typedef enum _ze_memory_access_attribute_t -{ - ZE_MEMORY_ACCESS_ATTRIBUTE_NONE = 0, ///< Indicates the memory page is inaccessible. - ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE = 1, ///< Indicates the memory page supports read write access. - ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY = 2, ///< Indicates the memory page supports read-only access. - ZE_MEMORY_ACCESS_ATTRIBUTE_FORCE_UINT32 = 0x7fffffff - -} ze_memory_access_attribute_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Reserves pages in virtual address space. -/// -/// @details -/// - The application must only use the memory allocation on the context for -/// which it was created. -/// - The starting address and size must be page aligned. See -/// ::zeVirtualMemQueryPageSize. -/// - If pStart is not null then implementation will attempt to reserve -/// starting from that address. If not available then will find another -/// suitable starting address. -/// - The application may call this function from simultaneous threads. -/// - The access attributes will default to none to indicate reservation is -/// inaccessible. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pStart` -/// + `nullptr == pptr` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -ZE_APIEXPORT ze_result_t ZE_APICALL -zeVirtualMemReserve( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* pStart, ///< [in] pointer to start of region to reserve. If nullptr then - ///< implementation will choose a start address. - size_t size, ///< [in] size in bytes to reserve; must be page aligned. - void** pptr ///< [out] pointer to virtual reservation. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Free pages in a reserved virtual address range. -/// -/// @details -/// - Any existing virtual mappings for the range will be unmapped. -/// - Physical allocations objects that were mapped to this range will not -/// be destroyed. These need to be destroyed explicitly. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeVirtualMemFree( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] pointer to start of region to free. - size_t size ///< [in] size in bytes to free; must be page aligned. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Queries page size to use for aligning virtual memory reservations and -/// physical memory allocations. -/// -/// @details -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == pagesize` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeVirtualMemQueryPageSize( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device object - size_t size, ///< [in] unaligned allocation size in bytes - size_t* pagesize ///< [out] pointer to page size to use for start address and size - ///< alignments. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported physical memory creation flags -typedef uint32_t ze_physical_mem_flags_t; -typedef enum _ze_physical_mem_flag_t -{ - ZE_PHYSICAL_MEM_FLAG_TBD = ZE_BIT(0), ///< reserved for future use. - ZE_PHYSICAL_MEM_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_physical_mem_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Physical memory descriptor -typedef struct _ze_physical_mem_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_physical_mem_flags_t flags; ///< [in] creation flags. - ///< must be 0 (default) or a valid combination of ::ze_physical_mem_flag_t. - size_t size; ///< [in] size in bytes to reserve; must be page aligned. - -} ze_physical_mem_desc_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Creates a physical memory object for the context. -/// -/// @details -/// - The application must only use the physical memory object on the -/// context for which it was created. -/// - The size must be page aligned. See ::zeVirtualMemQueryPageSize. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hDevice` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == desc` -/// + `nullptr == phPhysicalMemory` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `0x1 < desc->flags` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == desc->size` -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT -ZE_APIEXPORT ze_result_t ZE_APICALL -zePhysicalMemCreate( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_device_handle_t hDevice, ///< [in] handle of the device object - ze_physical_mem_desc_t* desc, ///< [in] pointer to physical memory descriptor. - ze_physical_mem_handle_t* phPhysicalMemory ///< [out] pointer to handle of physical memory object created - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Destroys a physical memory object. -/// -/// @details -/// - The application must ensure the device is not currently referencing -/// the physical memory object before it is deleted -/// - The application must **not** call this function from simultaneous -/// threads with the same physical memory handle. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hPhysicalMemory` -/// - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE -ZE_APIEXPORT ze_result_t ZE_APICALL -zePhysicalMemDestroy( - ze_context_handle_t hContext, ///< [in] handle of the context object - ze_physical_mem_handle_t hPhysicalMemory ///< [in][release] handle of physical memory object to destroy - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Maps pages in virtual address space to pages from physical memory -/// object. -/// -/// @details -/// - The virtual address range must have been reserved using -/// ::zeVirtualMemReserve. -/// - The application must only use the mapped memory allocation on the -/// context for which it was created. -/// - The virtual start address and size must be page aligned. See -/// ::zeVirtualMemQueryPageSize. -/// - The application should use, for the starting address and size, the -/// same size alignment used for the physical allocation. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// + `nullptr == hPhysicalMemory` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `::ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY < access` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT -ZE_APIEXPORT ze_result_t ZE_APICALL -zeVirtualMemMap( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] pointer to start of virtual address range to map. - size_t size, ///< [in] size in bytes of virtual address range to map; must be page - ///< aligned. - ze_physical_mem_handle_t hPhysicalMemory, ///< [in] handle to physical memory object. - size_t offset, ///< [in] offset into physical memory allocation object; must be page - ///< aligned. - ze_memory_access_attribute_t access ///< [in] specifies page access attributes to apply to the virtual address - ///< range. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Unmaps pages in virtual address space from pages from a physical -/// memory object. -/// -/// @details -/// - The page access attributes for virtual address range will revert back -/// to none. -/// - The application may call this function from simultaneous threads. -/// - The implementation of this function must be thread-safe. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY -/// - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT - "Address must be page aligned" -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// + Size must be page aligned -ZE_APIEXPORT ze_result_t ZE_APICALL -zeVirtualMemUnmap( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] pointer to start of region to unmap. - size_t size ///< [in] size in bytes to unmap; must be page aligned. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Set memory access attributes for a virtual address range. -/// -/// @details -/// - This function may be called from simultaneous threads with the same -/// function handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// - ::ZE_RESULT_ERROR_INVALID_ENUMERATION -/// + `::ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY < access` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT - "Address must be page aligned" -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// + Size must be page aligned -ZE_APIEXPORT ze_result_t ZE_APICALL -zeVirtualMemSetAccessAttribute( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] pointer to start of reserved virtual address region. - size_t size, ///< [in] size in bytes; must be page aligned. - ze_memory_access_attribute_t access ///< [in] specifies page access attributes to apply to the virtual address - ///< range. - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Get memory access attribute for a virtual address range. -/// -/// @details -/// - If size and outSize are equal then the pages in the specified virtual -/// address range have the same access attributes. -/// - This function may be called from simultaneous threads with the same -/// function handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hContext` -/// - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER -/// + `nullptr == ptr` -/// + `nullptr == access` -/// + `nullptr == outSize` -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT - "Address must be page aligned" -/// - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE -/// + `0 == size` -/// + Size must be page aligned -ZE_APIEXPORT ze_result_t ZE_APICALL -zeVirtualMemGetAccessAttribute( - ze_context_handle_t hContext, ///< [in] handle of the context object - const void* ptr, ///< [in] pointer to start of virtual address region for query. - size_t size, ///< [in] size in bytes; must be page aligned. - ze_memory_access_attribute_t* access, ///< [out] query result for page access attribute. - size_t* outSize ///< [out] query result for size of virtual address range, starting at ptr, - ///< that shares same access attribute. - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero Extension APIs for Floating-Point Atomics -#if !defined(__GNUC__) -#pragma region floatAtomics -#endif -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_FLOAT_ATOMICS_EXT_NAME -/// @brief Floating-Point Atomics Extension Name -#define ZE_FLOAT_ATOMICS_EXT_NAME "ZE_extension_float_atomics" -#endif // ZE_FLOAT_ATOMICS_EXT_NAME - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Floating-Point Atomics Extension Version(s) -typedef enum _ze_float_atomics_ext_version_t -{ - ZE_FLOAT_ATOMICS_EXT_VERSION_1_0 = ZE_MAKE_VERSION( 1, 0 ), ///< version 1.0 - ZE_FLOAT_ATOMICS_EXT_VERSION_CURRENT = ZE_MAKE_VERSION( 1, 0 ), ///< latest known version - ZE_FLOAT_ATOMICS_EXT_VERSION_FORCE_UINT32 = 0x7fffffff - -} ze_float_atomics_ext_version_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported floating-point atomic capability flags -typedef uint32_t ze_device_fp_atomic_ext_flags_t; -typedef enum _ze_device_fp_atomic_ext_flag_t -{ - ZE_DEVICE_FP_ATOMIC_EXT_FLAG_GLOBAL_LOAD_STORE = ZE_BIT(0), ///< Supports atomic load, store, and exchange - ZE_DEVICE_FP_ATOMIC_EXT_FLAG_GLOBAL_ADD = ZE_BIT(1),///< Supports atomic add and subtract - ZE_DEVICE_FP_ATOMIC_EXT_FLAG_GLOBAL_MIN_MAX = ZE_BIT(2),///< Supports atomic min and max - ZE_DEVICE_FP_ATOMIC_EXT_FLAG_LOCAL_LOAD_STORE = ZE_BIT(16), ///< Supports atomic load, store, and exchange - ZE_DEVICE_FP_ATOMIC_EXT_FLAG_LOCAL_ADD = ZE_BIT(17),///< Supports atomic add and subtract - ZE_DEVICE_FP_ATOMIC_EXT_FLAG_LOCAL_MIN_MAX = ZE_BIT(18),///< Supports atomic min and max - ZE_DEVICE_FP_ATOMIC_EXT_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_device_fp_atomic_ext_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Device floating-point atomic properties queried using -/// ::zeDeviceGetModuleProperties -/// -/// @details -/// - This structure may be returned from ::zeDeviceGetModuleProperties, via -/// `pNext` member of ::ze_device_module_properties_t. -typedef struct _ze_float_atomic_ext_properties_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - void* pNext; ///< [in,out][optional] pointer to extension-specific structure - ze_device_fp_atomic_ext_flags_t fp16Flags; ///< [out] Capabilities for half-precision floating-point atomic operations - ze_device_fp_atomic_ext_flags_t fp32Flags; ///< [out] Capabilities for single-precision floating-point atomic - ///< operations - ze_device_fp_atomic_ext_flags_t fp64Flags; ///< [out] Capabilities for double-precision floating-point atomic - ///< operations - -} ze_float_atomic_ext_properties_t; - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero Extension for supporting kernel global work offset. -#if !defined(__GNUC__) -#pragma region globaloffset -#endif -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_GLOBAL_OFFSET_EXP_NAME -/// @brief Global Offset Extension Name -#define ZE_GLOBAL_OFFSET_EXP_NAME "ZE_experimental_global_offset" -#endif // ZE_GLOBAL_OFFSET_EXP_NAME - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Global Offset Extension Version(s) -typedef enum _ze_global_offset_exp_version_t -{ - ZE_GLOBAL_OFFSET_EXP_VERSION_1_0 = ZE_MAKE_VERSION( 1, 0 ), ///< version 1.0 - ZE_GLOBAL_OFFSET_EXP_VERSION_CURRENT = ZE_MAKE_VERSION( 1, 0 ), ///< latest known version - ZE_GLOBAL_OFFSET_EXP_VERSION_FORCE_UINT32 = 0x7fffffff - -} ze_global_offset_exp_version_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Set global work offset for a kernel on the current Host thread. -/// -/// @details -/// - The global work offset will be used when -/// a ::zeCommandListAppendLaunchKernel() variant is called. -/// - The application must **not** call this function from simultaneous -/// threads with the same kernel handle. -/// - The implementation of this function should be lock-free. -/// -/// @returns -/// - ::ZE_RESULT_SUCCESS -/// - ::ZE_RESULT_ERROR_UNINITIALIZED -/// - ::ZE_RESULT_ERROR_DEVICE_LOST -/// - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE -/// + `nullptr == hKernel` -ZE_APIEXPORT ze_result_t ZE_APICALL -zeKernelSetGlobalOffsetExp( - ze_kernel_handle_t hKernel, ///< [in] handle of the kernel object - uint32_t offsetX, ///< [in] global offset for X dimension to use for this kernel - uint32_t offsetY, ///< [in] global offset for Y dimension to use for this kernel - uint32_t offsetZ ///< [in] global offset for Z dimension to use for this kernel - ); - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero Extension for supporting relaxed allocation limits. -#if !defined(__GNUC__) -#pragma region relaxedAllocLimits -#endif -/////////////////////////////////////////////////////////////////////////////// -#ifndef ZE_RELAXED_ALLOCATION_LIMITS_EXP_NAME -/// @brief Relaxed Allocation Limits Extension Name -#define ZE_RELAXED_ALLOCATION_LIMITS_EXP_NAME "ZE_experimental_relaxed_allocation_limits" -#endif // ZE_RELAXED_ALLOCATION_LIMITS_EXP_NAME - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Relaxed Allocation Limits Extension Version(s) -typedef enum _ze_relaxed_allocation_limits_exp_version_t -{ - ZE_RELAXED_ALLOCATION_LIMITS_EXP_VERSION_1_0 = ZE_MAKE_VERSION( 1, 0 ), ///< version 1.0 - ZE_RELAXED_ALLOCATION_LIMITS_EXP_VERSION_CURRENT = ZE_MAKE_VERSION( 1, 0 ), ///< latest known version - ZE_RELAXED_ALLOCATION_LIMITS_EXP_VERSION_FORCE_UINT32 = 0x7fffffff - -} ze_relaxed_allocation_limits_exp_version_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Supported relaxed memory allocation flags -typedef uint32_t ze_relaxed_allocation_limits_exp_flags_t; -typedef enum _ze_relaxed_allocation_limits_exp_flag_t -{ - ZE_RELAXED_ALLOCATION_LIMITS_EXP_FLAG_MAX_SIZE = ZE_BIT(0), ///< Allocation size may exceed ::ze_device_properties_t.maxMemAllocSize - ZE_RELAXED_ALLOCATION_LIMITS_EXP_FLAG_FORCE_UINT32 = 0x7fffffff - -} ze_relaxed_allocation_limits_exp_flag_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Relaxed limits memory allocation descriptor -/// -/// @details -/// - This structure may be passed to ::zeMemAllocShared or -/// ::zeMemAllocDevice, via `pNext` member of -/// ::ze_device_mem_alloc_desc_t. -/// - This structure may also be passed to ::zeMemAllocHost, via `pNext` -/// member of ::ze_host_mem_alloc_desc_t. -typedef struct _ze_relaxed_allocation_limits_exp_desc_t -{ - ze_structure_type_t stype; ///< [in] type of this structure - const void* pNext; ///< [in][optional] pointer to extension-specific structure - ze_relaxed_allocation_limits_exp_flags_t flags; ///< [in] flags specifying allocation limits to relax. - ///< must be 0 (default) or a valid combination of ::ze_relaxed_allocation_limits_exp_flag_t; - -} ze_relaxed_allocation_limits_exp_desc_t; - -#if !defined(__GNUC__) -#pragma endregion -#endif -// Intel 'oneAPI' Level-Zero API Callbacks -#if !defined(__GNUC__) -#pragma region callbacks -#endif -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeInit -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_init_params_t -{ - ze_init_flags_t* pflags; -} ze_init_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeInit -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnInitCb_t)( - ze_init_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Global callback functions pointers -typedef struct _ze_global_callbacks_t -{ - ze_pfnInitCb_t pfnInitCb; -} ze_global_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDriverGet -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_driver_get_params_t -{ - uint32_t** ppCount; - ze_driver_handle_t** pphDrivers; -} ze_driver_get_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDriverGet -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDriverGetCb_t)( - ze_driver_get_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDriverGetApiVersion -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_driver_get_api_version_params_t -{ - ze_driver_handle_t* phDriver; - ze_api_version_t** pversion; -} ze_driver_get_api_version_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDriverGetApiVersion -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDriverGetApiVersionCb_t)( - ze_driver_get_api_version_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDriverGetProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_driver_get_properties_params_t -{ - ze_driver_handle_t* phDriver; - ze_driver_properties_t** ppDriverProperties; -} ze_driver_get_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDriverGetProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDriverGetPropertiesCb_t)( - ze_driver_get_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDriverGetIpcProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_driver_get_ipc_properties_params_t -{ - ze_driver_handle_t* phDriver; - ze_driver_ipc_properties_t** ppIpcProperties; -} ze_driver_get_ipc_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDriverGetIpcProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDriverGetIpcPropertiesCb_t)( - ze_driver_get_ipc_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDriverGetExtensionProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_driver_get_extension_properties_params_t -{ - ze_driver_handle_t* phDriver; - uint32_t** ppCount; - ze_driver_extension_properties_t** ppExtensionProperties; -} ze_driver_get_extension_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDriverGetExtensionProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDriverGetExtensionPropertiesCb_t)( - ze_driver_get_extension_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Driver callback functions pointers -typedef struct _ze_driver_callbacks_t -{ - ze_pfnDriverGetCb_t pfnGetCb; - ze_pfnDriverGetApiVersionCb_t pfnGetApiVersionCb; - ze_pfnDriverGetPropertiesCb_t pfnGetPropertiesCb; - ze_pfnDriverGetIpcPropertiesCb_t pfnGetIpcPropertiesCb; - ze_pfnDriverGetExtensionPropertiesCb_t pfnGetExtensionPropertiesCb; -} ze_driver_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGet -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_params_t -{ - ze_driver_handle_t* phDriver; - uint32_t** ppCount; - ze_device_handle_t** pphDevices; -} ze_device_get_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGet -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetCb_t)( - ze_device_get_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetSubDevices -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_sub_devices_params_t -{ - ze_device_handle_t* phDevice; - uint32_t** ppCount; - ze_device_handle_t** pphSubdevices; -} ze_device_get_sub_devices_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetSubDevices -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetSubDevicesCb_t)( - ze_device_get_sub_devices_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_properties_params_t -{ - ze_device_handle_t* phDevice; - ze_device_properties_t** ppDeviceProperties; -} ze_device_get_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetPropertiesCb_t)( - ze_device_get_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetComputeProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_compute_properties_params_t -{ - ze_device_handle_t* phDevice; - ze_device_compute_properties_t** ppComputeProperties; -} ze_device_get_compute_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetComputeProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetComputePropertiesCb_t)( - ze_device_get_compute_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetModuleProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_module_properties_params_t -{ - ze_device_handle_t* phDevice; - ze_device_module_properties_t** ppModuleProperties; -} ze_device_get_module_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetModuleProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetModulePropertiesCb_t)( - ze_device_get_module_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetCommandQueueGroupProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_command_queue_group_properties_params_t -{ - ze_device_handle_t* phDevice; - uint32_t** ppCount; - ze_command_queue_group_properties_t** ppCommandQueueGroupProperties; -} ze_device_get_command_queue_group_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetCommandQueueGroupProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetCommandQueueGroupPropertiesCb_t)( - ze_device_get_command_queue_group_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetMemoryProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_memory_properties_params_t -{ - ze_device_handle_t* phDevice; - uint32_t** ppCount; - ze_device_memory_properties_t** ppMemProperties; -} ze_device_get_memory_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetMemoryProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetMemoryPropertiesCb_t)( - ze_device_get_memory_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetMemoryAccessProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_memory_access_properties_params_t -{ - ze_device_handle_t* phDevice; - ze_device_memory_access_properties_t** ppMemAccessProperties; -} ze_device_get_memory_access_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetMemoryAccessProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetMemoryAccessPropertiesCb_t)( - ze_device_get_memory_access_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetCacheProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_cache_properties_params_t -{ - ze_device_handle_t* phDevice; - uint32_t** ppCount; - ze_device_cache_properties_t** ppCacheProperties; -} ze_device_get_cache_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetCacheProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetCachePropertiesCb_t)( - ze_device_get_cache_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetImageProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_image_properties_params_t -{ - ze_device_handle_t* phDevice; - ze_device_image_properties_t** ppImageProperties; -} ze_device_get_image_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetImageProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetImagePropertiesCb_t)( - ze_device_get_image_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetExternalMemoryProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_external_memory_properties_params_t -{ - ze_device_handle_t* phDevice; - ze_device_external_memory_properties_t** ppExternalMemoryProperties; -} ze_device_get_external_memory_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetExternalMemoryProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetExternalMemoryPropertiesCb_t)( - ze_device_get_external_memory_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetP2PProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_p2_p_properties_params_t -{ - ze_device_handle_t* phDevice; - ze_device_handle_t* phPeerDevice; - ze_device_p2p_properties_t** ppP2PProperties; -} ze_device_get_p2_p_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetP2PProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetP2PPropertiesCb_t)( - ze_device_get_p2_p_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceCanAccessPeer -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_can_access_peer_params_t -{ - ze_device_handle_t* phDevice; - ze_device_handle_t* phPeerDevice; - ze_bool_t** pvalue; -} ze_device_can_access_peer_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceCanAccessPeer -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceCanAccessPeerCb_t)( - ze_device_can_access_peer_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeDeviceGetStatus -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_device_get_status_params_t -{ - ze_device_handle_t* phDevice; -} ze_device_get_status_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeDeviceGetStatus -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnDeviceGetStatusCb_t)( - ze_device_get_status_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Device callback functions pointers -typedef struct _ze_device_callbacks_t -{ - ze_pfnDeviceGetCb_t pfnGetCb; - ze_pfnDeviceGetSubDevicesCb_t pfnGetSubDevicesCb; - ze_pfnDeviceGetPropertiesCb_t pfnGetPropertiesCb; - ze_pfnDeviceGetComputePropertiesCb_t pfnGetComputePropertiesCb; - ze_pfnDeviceGetModulePropertiesCb_t pfnGetModulePropertiesCb; - ze_pfnDeviceGetCommandQueueGroupPropertiesCb_t pfnGetCommandQueueGroupPropertiesCb; - ze_pfnDeviceGetMemoryPropertiesCb_t pfnGetMemoryPropertiesCb; - ze_pfnDeviceGetMemoryAccessPropertiesCb_t pfnGetMemoryAccessPropertiesCb; - ze_pfnDeviceGetCachePropertiesCb_t pfnGetCachePropertiesCb; - ze_pfnDeviceGetImagePropertiesCb_t pfnGetImagePropertiesCb; - ze_pfnDeviceGetExternalMemoryPropertiesCb_t pfnGetExternalMemoryPropertiesCb; - ze_pfnDeviceGetP2PPropertiesCb_t pfnGetP2PPropertiesCb; - ze_pfnDeviceCanAccessPeerCb_t pfnCanAccessPeerCb; - ze_pfnDeviceGetStatusCb_t pfnGetStatusCb; -} ze_device_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_create_params_t -{ - ze_driver_handle_t* phDriver; - const ze_context_desc_t** pdesc; - ze_context_handle_t** pphContext; -} ze_context_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextCreateCb_t)( - ze_context_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_destroy_params_t -{ - ze_context_handle_t* phContext; -} ze_context_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextDestroyCb_t)( - ze_context_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextGetStatus -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_get_status_params_t -{ - ze_context_handle_t* phContext; -} ze_context_get_status_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextGetStatus -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextGetStatusCb_t)( - ze_context_get_status_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextSystemBarrier -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_system_barrier_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; -} ze_context_system_barrier_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextSystemBarrier -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextSystemBarrierCb_t)( - ze_context_system_barrier_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextMakeMemoryResident -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_make_memory_resident_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - void** pptr; - size_t* psize; -} ze_context_make_memory_resident_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextMakeMemoryResident -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextMakeMemoryResidentCb_t)( - ze_context_make_memory_resident_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextEvictMemory -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_evict_memory_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - void** pptr; - size_t* psize; -} ze_context_evict_memory_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextEvictMemory -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextEvictMemoryCb_t)( - ze_context_evict_memory_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextMakeImageResident -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_make_image_resident_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - ze_image_handle_t* phImage; -} ze_context_make_image_resident_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextMakeImageResident -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextMakeImageResidentCb_t)( - ze_context_make_image_resident_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeContextEvictImage -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_context_evict_image_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - ze_image_handle_t* phImage; -} ze_context_evict_image_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeContextEvictImage -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnContextEvictImageCb_t)( - ze_context_evict_image_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Context callback functions pointers -typedef struct _ze_context_callbacks_t -{ - ze_pfnContextCreateCb_t pfnCreateCb; - ze_pfnContextDestroyCb_t pfnDestroyCb; - ze_pfnContextGetStatusCb_t pfnGetStatusCb; - ze_pfnContextSystemBarrierCb_t pfnSystemBarrierCb; - ze_pfnContextMakeMemoryResidentCb_t pfnMakeMemoryResidentCb; - ze_pfnContextEvictMemoryCb_t pfnEvictMemoryCb; - ze_pfnContextMakeImageResidentCb_t pfnMakeImageResidentCb; - ze_pfnContextEvictImageCb_t pfnEvictImageCb; -} ze_context_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandQueueCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_queue_create_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - const ze_command_queue_desc_t** pdesc; - ze_command_queue_handle_t** pphCommandQueue; -} ze_command_queue_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandQueueCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandQueueCreateCb_t)( - ze_command_queue_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandQueueDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_queue_destroy_params_t -{ - ze_command_queue_handle_t* phCommandQueue; -} ze_command_queue_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandQueueDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandQueueDestroyCb_t)( - ze_command_queue_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandQueueExecuteCommandLists -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_queue_execute_command_lists_params_t -{ - ze_command_queue_handle_t* phCommandQueue; - uint32_t* pnumCommandLists; - ze_command_list_handle_t** pphCommandLists; - ze_fence_handle_t* phFence; -} ze_command_queue_execute_command_lists_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandQueueExecuteCommandLists -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandQueueExecuteCommandListsCb_t)( - ze_command_queue_execute_command_lists_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandQueueSynchronize -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_queue_synchronize_params_t -{ - ze_command_queue_handle_t* phCommandQueue; - uint64_t* ptimeout; -} ze_command_queue_synchronize_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandQueueSynchronize -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandQueueSynchronizeCb_t)( - ze_command_queue_synchronize_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of CommandQueue callback functions pointers -typedef struct _ze_command_queue_callbacks_t -{ - ze_pfnCommandQueueCreateCb_t pfnCreateCb; - ze_pfnCommandQueueDestroyCb_t pfnDestroyCb; - ze_pfnCommandQueueExecuteCommandListsCb_t pfnExecuteCommandListsCb; - ze_pfnCommandQueueSynchronizeCb_t pfnSynchronizeCb; -} ze_command_queue_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_create_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - const ze_command_list_desc_t** pdesc; - ze_command_list_handle_t** pphCommandList; -} ze_command_list_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListCreateCb_t)( - ze_command_list_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListCreateImmediate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_create_immediate_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - const ze_command_queue_desc_t** paltdesc; - ze_command_list_handle_t** pphCommandList; -} ze_command_list_create_immediate_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListCreateImmediate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListCreateImmediateCb_t)( - ze_command_list_create_immediate_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_destroy_params_t -{ - ze_command_list_handle_t* phCommandList; -} ze_command_list_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListDestroyCb_t)( - ze_command_list_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListClose -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_close_params_t -{ - ze_command_list_handle_t* phCommandList; -} ze_command_list_close_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListClose -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListCloseCb_t)( - ze_command_list_close_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListReset -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_reset_params_t -{ - ze_command_list_handle_t* phCommandList; -} ze_command_list_reset_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListReset -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListResetCb_t)( - ze_command_list_reset_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendWriteGlobalTimestamp -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_write_global_timestamp_params_t -{ - ze_command_list_handle_t* phCommandList; - uint64_t** pdstptr; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_write_global_timestamp_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendWriteGlobalTimestamp -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendWriteGlobalTimestampCb_t)( - ze_command_list_append_write_global_timestamp_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendBarrier -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_barrier_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_barrier_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendBarrier -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendBarrierCb_t)( - ze_command_list_append_barrier_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendMemoryRangesBarrier -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_memory_ranges_barrier_params_t -{ - ze_command_list_handle_t* phCommandList; - uint32_t* pnumRanges; - const size_t** ppRangeSizes; - const void*** ppRanges; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_memory_ranges_barrier_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendMemoryRangesBarrier -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendMemoryRangesBarrierCb_t)( - ze_command_list_append_memory_ranges_barrier_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendMemoryCopy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_memory_copy_params_t -{ - ze_command_list_handle_t* phCommandList; - void** pdstptr; - const void** psrcptr; - size_t* psize; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_memory_copy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendMemoryCopy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendMemoryCopyCb_t)( - ze_command_list_append_memory_copy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendMemoryFill -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_memory_fill_params_t -{ - ze_command_list_handle_t* phCommandList; - void** pptr; - const void** ppattern; - size_t* ppattern_size; - size_t* psize; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_memory_fill_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendMemoryFill -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendMemoryFillCb_t)( - ze_command_list_append_memory_fill_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendMemoryCopyRegion -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_memory_copy_region_params_t -{ - ze_command_list_handle_t* phCommandList; - void** pdstptr; - const ze_copy_region_t** pdstRegion; - uint32_t* pdstPitch; - uint32_t* pdstSlicePitch; - const void** psrcptr; - const ze_copy_region_t** psrcRegion; - uint32_t* psrcPitch; - uint32_t* psrcSlicePitch; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_memory_copy_region_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendMemoryCopyRegion -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendMemoryCopyRegionCb_t)( - ze_command_list_append_memory_copy_region_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendMemoryCopyFromContext -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_memory_copy_from_context_params_t -{ - ze_command_list_handle_t* phCommandList; - void** pdstptr; - ze_context_handle_t* phContextSrc; - const void** psrcptr; - size_t* psize; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_memory_copy_from_context_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendMemoryCopyFromContext -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendMemoryCopyFromContextCb_t)( - ze_command_list_append_memory_copy_from_context_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendImageCopy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_image_copy_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_image_handle_t* phDstImage; - ze_image_handle_t* phSrcImage; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_image_copy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendImageCopy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendImageCopyCb_t)( - ze_command_list_append_image_copy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendImageCopyRegion -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_image_copy_region_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_image_handle_t* phDstImage; - ze_image_handle_t* phSrcImage; - const ze_image_region_t** ppDstRegion; - const ze_image_region_t** ppSrcRegion; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_image_copy_region_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendImageCopyRegion -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendImageCopyRegionCb_t)( - ze_command_list_append_image_copy_region_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendImageCopyToMemory -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_image_copy_to_memory_params_t -{ - ze_command_list_handle_t* phCommandList; - void** pdstptr; - ze_image_handle_t* phSrcImage; - const ze_image_region_t** ppSrcRegion; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_image_copy_to_memory_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendImageCopyToMemory -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendImageCopyToMemoryCb_t)( - ze_command_list_append_image_copy_to_memory_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendImageCopyFromMemory -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_image_copy_from_memory_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_image_handle_t* phDstImage; - const void** psrcptr; - const ze_image_region_t** ppDstRegion; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_image_copy_from_memory_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendImageCopyFromMemory -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendImageCopyFromMemoryCb_t)( - ze_command_list_append_image_copy_from_memory_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendMemoryPrefetch -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_memory_prefetch_params_t -{ - ze_command_list_handle_t* phCommandList; - const void** pptr; - size_t* psize; -} ze_command_list_append_memory_prefetch_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendMemoryPrefetch -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendMemoryPrefetchCb_t)( - ze_command_list_append_memory_prefetch_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendMemAdvise -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_mem_advise_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_device_handle_t* phDevice; - const void** pptr; - size_t* psize; - ze_memory_advice_t* padvice; -} ze_command_list_append_mem_advise_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendMemAdvise -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendMemAdviseCb_t)( - ze_command_list_append_mem_advise_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendSignalEvent -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_signal_event_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_event_handle_t* phEvent; -} ze_command_list_append_signal_event_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendSignalEvent -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendSignalEventCb_t)( - ze_command_list_append_signal_event_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendWaitOnEvents -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_wait_on_events_params_t -{ - ze_command_list_handle_t* phCommandList; - uint32_t* pnumEvents; - ze_event_handle_t** pphEvents; -} ze_command_list_append_wait_on_events_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendWaitOnEvents -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendWaitOnEventsCb_t)( - ze_command_list_append_wait_on_events_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendEventReset -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_event_reset_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_event_handle_t* phEvent; -} ze_command_list_append_event_reset_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendEventReset -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendEventResetCb_t)( - ze_command_list_append_event_reset_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendQueryKernelTimestamps -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_query_kernel_timestamps_params_t -{ - ze_command_list_handle_t* phCommandList; - uint32_t* pnumEvents; - ze_event_handle_t** pphEvents; - void** pdstptr; - const size_t** ppOffsets; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_query_kernel_timestamps_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendQueryKernelTimestamps -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendQueryKernelTimestampsCb_t)( - ze_command_list_append_query_kernel_timestamps_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendLaunchKernel -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_launch_kernel_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_kernel_handle_t* phKernel; - const ze_group_count_t** ppLaunchFuncArgs; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_launch_kernel_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendLaunchKernel -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendLaunchKernelCb_t)( - ze_command_list_append_launch_kernel_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendLaunchCooperativeKernel -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_launch_cooperative_kernel_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_kernel_handle_t* phKernel; - const ze_group_count_t** ppLaunchFuncArgs; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_launch_cooperative_kernel_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendLaunchCooperativeKernel -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendLaunchCooperativeKernelCb_t)( - ze_command_list_append_launch_cooperative_kernel_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendLaunchKernelIndirect -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_launch_kernel_indirect_params_t -{ - ze_command_list_handle_t* phCommandList; - ze_kernel_handle_t* phKernel; - const ze_group_count_t** ppLaunchArgumentsBuffer; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_launch_kernel_indirect_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendLaunchKernelIndirect -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendLaunchKernelIndirectCb_t)( - ze_command_list_append_launch_kernel_indirect_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeCommandListAppendLaunchMultipleKernelsIndirect -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_command_list_append_launch_multiple_kernels_indirect_params_t -{ - ze_command_list_handle_t* phCommandList; - uint32_t* pnumKernels; - ze_kernel_handle_t** pphKernels; - const uint32_t** ppCountBuffer; - const ze_group_count_t** ppLaunchArgumentsBuffer; - ze_event_handle_t* phSignalEvent; - uint32_t* pnumWaitEvents; - ze_event_handle_t** pphWaitEvents; -} ze_command_list_append_launch_multiple_kernels_indirect_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeCommandListAppendLaunchMultipleKernelsIndirect -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnCommandListAppendLaunchMultipleKernelsIndirectCb_t)( - ze_command_list_append_launch_multiple_kernels_indirect_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of CommandList callback functions pointers -typedef struct _ze_command_list_callbacks_t -{ - ze_pfnCommandListCreateCb_t pfnCreateCb; - ze_pfnCommandListCreateImmediateCb_t pfnCreateImmediateCb; - ze_pfnCommandListDestroyCb_t pfnDestroyCb; - ze_pfnCommandListCloseCb_t pfnCloseCb; - ze_pfnCommandListResetCb_t pfnResetCb; - ze_pfnCommandListAppendWriteGlobalTimestampCb_t pfnAppendWriteGlobalTimestampCb; - ze_pfnCommandListAppendBarrierCb_t pfnAppendBarrierCb; - ze_pfnCommandListAppendMemoryRangesBarrierCb_t pfnAppendMemoryRangesBarrierCb; - ze_pfnCommandListAppendMemoryCopyCb_t pfnAppendMemoryCopyCb; - ze_pfnCommandListAppendMemoryFillCb_t pfnAppendMemoryFillCb; - ze_pfnCommandListAppendMemoryCopyRegionCb_t pfnAppendMemoryCopyRegionCb; - ze_pfnCommandListAppendMemoryCopyFromContextCb_t pfnAppendMemoryCopyFromContextCb; - ze_pfnCommandListAppendImageCopyCb_t pfnAppendImageCopyCb; - ze_pfnCommandListAppendImageCopyRegionCb_t pfnAppendImageCopyRegionCb; - ze_pfnCommandListAppendImageCopyToMemoryCb_t pfnAppendImageCopyToMemoryCb; - ze_pfnCommandListAppendImageCopyFromMemoryCb_t pfnAppendImageCopyFromMemoryCb; - ze_pfnCommandListAppendMemoryPrefetchCb_t pfnAppendMemoryPrefetchCb; - ze_pfnCommandListAppendMemAdviseCb_t pfnAppendMemAdviseCb; - ze_pfnCommandListAppendSignalEventCb_t pfnAppendSignalEventCb; - ze_pfnCommandListAppendWaitOnEventsCb_t pfnAppendWaitOnEventsCb; - ze_pfnCommandListAppendEventResetCb_t pfnAppendEventResetCb; - ze_pfnCommandListAppendQueryKernelTimestampsCb_t pfnAppendQueryKernelTimestampsCb; - ze_pfnCommandListAppendLaunchKernelCb_t pfnAppendLaunchKernelCb; - ze_pfnCommandListAppendLaunchCooperativeKernelCb_t pfnAppendLaunchCooperativeKernelCb; - ze_pfnCommandListAppendLaunchKernelIndirectCb_t pfnAppendLaunchKernelIndirectCb; - ze_pfnCommandListAppendLaunchMultipleKernelsIndirectCb_t pfnAppendLaunchMultipleKernelsIndirectCb; -} ze_command_list_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeFenceCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_fence_create_params_t -{ - ze_command_queue_handle_t* phCommandQueue; - const ze_fence_desc_t** pdesc; - ze_fence_handle_t** pphFence; -} ze_fence_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeFenceCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnFenceCreateCb_t)( - ze_fence_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeFenceDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_fence_destroy_params_t -{ - ze_fence_handle_t* phFence; -} ze_fence_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeFenceDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnFenceDestroyCb_t)( - ze_fence_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeFenceHostSynchronize -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_fence_host_synchronize_params_t -{ - ze_fence_handle_t* phFence; - uint64_t* ptimeout; -} ze_fence_host_synchronize_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeFenceHostSynchronize -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnFenceHostSynchronizeCb_t)( - ze_fence_host_synchronize_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeFenceQueryStatus -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_fence_query_status_params_t -{ - ze_fence_handle_t* phFence; -} ze_fence_query_status_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeFenceQueryStatus -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnFenceQueryStatusCb_t)( - ze_fence_query_status_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeFenceReset -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_fence_reset_params_t -{ - ze_fence_handle_t* phFence; -} ze_fence_reset_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeFenceReset -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnFenceResetCb_t)( - ze_fence_reset_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Fence callback functions pointers -typedef struct _ze_fence_callbacks_t -{ - ze_pfnFenceCreateCb_t pfnCreateCb; - ze_pfnFenceDestroyCb_t pfnDestroyCb; - ze_pfnFenceHostSynchronizeCb_t pfnHostSynchronizeCb; - ze_pfnFenceQueryStatusCb_t pfnQueryStatusCb; - ze_pfnFenceResetCb_t pfnResetCb; -} ze_fence_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventPoolCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_pool_create_params_t -{ - ze_context_handle_t* phContext; - const ze_event_pool_desc_t** pdesc; - uint32_t* pnumDevices; - ze_device_handle_t** pphDevices; - ze_event_pool_handle_t** pphEventPool; -} ze_event_pool_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventPoolCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventPoolCreateCb_t)( - ze_event_pool_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventPoolDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_pool_destroy_params_t -{ - ze_event_pool_handle_t* phEventPool; -} ze_event_pool_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventPoolDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventPoolDestroyCb_t)( - ze_event_pool_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventPoolGetIpcHandle -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_pool_get_ipc_handle_params_t -{ - ze_event_pool_handle_t* phEventPool; - ze_ipc_event_pool_handle_t** pphIpc; -} ze_event_pool_get_ipc_handle_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventPoolGetIpcHandle -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventPoolGetIpcHandleCb_t)( - ze_event_pool_get_ipc_handle_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventPoolOpenIpcHandle -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_pool_open_ipc_handle_params_t -{ - ze_context_handle_t* phContext; - ze_ipc_event_pool_handle_t* phIpc; - ze_event_pool_handle_t** pphEventPool; -} ze_event_pool_open_ipc_handle_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventPoolOpenIpcHandle -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventPoolOpenIpcHandleCb_t)( - ze_event_pool_open_ipc_handle_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventPoolCloseIpcHandle -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_pool_close_ipc_handle_params_t -{ - ze_event_pool_handle_t* phEventPool; -} ze_event_pool_close_ipc_handle_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventPoolCloseIpcHandle -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventPoolCloseIpcHandleCb_t)( - ze_event_pool_close_ipc_handle_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of EventPool callback functions pointers -typedef struct _ze_event_pool_callbacks_t -{ - ze_pfnEventPoolCreateCb_t pfnCreateCb; - ze_pfnEventPoolDestroyCb_t pfnDestroyCb; - ze_pfnEventPoolGetIpcHandleCb_t pfnGetIpcHandleCb; - ze_pfnEventPoolOpenIpcHandleCb_t pfnOpenIpcHandleCb; - ze_pfnEventPoolCloseIpcHandleCb_t pfnCloseIpcHandleCb; -} ze_event_pool_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_create_params_t -{ - ze_event_pool_handle_t* phEventPool; - const ze_event_desc_t** pdesc; - ze_event_handle_t** pphEvent; -} ze_event_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventCreateCb_t)( - ze_event_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_destroy_params_t -{ - ze_event_handle_t* phEvent; -} ze_event_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventDestroyCb_t)( - ze_event_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventHostSignal -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_host_signal_params_t -{ - ze_event_handle_t* phEvent; -} ze_event_host_signal_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventHostSignal -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventHostSignalCb_t)( - ze_event_host_signal_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventHostSynchronize -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_host_synchronize_params_t -{ - ze_event_handle_t* phEvent; - uint64_t* ptimeout; -} ze_event_host_synchronize_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventHostSynchronize -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventHostSynchronizeCb_t)( - ze_event_host_synchronize_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventQueryStatus -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_query_status_params_t -{ - ze_event_handle_t* phEvent; -} ze_event_query_status_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventQueryStatus -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventQueryStatusCb_t)( - ze_event_query_status_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventHostReset -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_host_reset_params_t -{ - ze_event_handle_t* phEvent; -} ze_event_host_reset_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventHostReset -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventHostResetCb_t)( - ze_event_host_reset_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeEventQueryKernelTimestamp -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_event_query_kernel_timestamp_params_t -{ - ze_event_handle_t* phEvent; - ze_kernel_timestamp_result_t** pdstptr; -} ze_event_query_kernel_timestamp_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeEventQueryKernelTimestamp -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnEventQueryKernelTimestampCb_t)( - ze_event_query_kernel_timestamp_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Event callback functions pointers -typedef struct _ze_event_callbacks_t -{ - ze_pfnEventCreateCb_t pfnCreateCb; - ze_pfnEventDestroyCb_t pfnDestroyCb; - ze_pfnEventHostSignalCb_t pfnHostSignalCb; - ze_pfnEventHostSynchronizeCb_t pfnHostSynchronizeCb; - ze_pfnEventQueryStatusCb_t pfnQueryStatusCb; - ze_pfnEventHostResetCb_t pfnHostResetCb; - ze_pfnEventQueryKernelTimestampCb_t pfnQueryKernelTimestampCb; -} ze_event_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeImageGetProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_image_get_properties_params_t -{ - ze_device_handle_t* phDevice; - const ze_image_desc_t** pdesc; - ze_image_properties_t** ppImageProperties; -} ze_image_get_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeImageGetProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnImageGetPropertiesCb_t)( - ze_image_get_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeImageCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_image_create_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - const ze_image_desc_t** pdesc; - ze_image_handle_t** pphImage; -} ze_image_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeImageCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnImageCreateCb_t)( - ze_image_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeImageDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_image_destroy_params_t -{ - ze_image_handle_t* phImage; -} ze_image_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeImageDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnImageDestroyCb_t)( - ze_image_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Image callback functions pointers -typedef struct _ze_image_callbacks_t -{ - ze_pfnImageGetPropertiesCb_t pfnGetPropertiesCb; - ze_pfnImageCreateCb_t pfnCreateCb; - ze_pfnImageDestroyCb_t pfnDestroyCb; -} ze_image_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_create_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - const ze_module_desc_t** pdesc; - ze_module_handle_t** pphModule; - ze_module_build_log_handle_t** pphBuildLog; -} ze_module_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleCreateCb_t)( - ze_module_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_destroy_params_t -{ - ze_module_handle_t* phModule; -} ze_module_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleDestroyCb_t)( - ze_module_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleDynamicLink -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_dynamic_link_params_t -{ - uint32_t* pnumModules; - ze_module_handle_t** pphModules; - ze_module_build_log_handle_t** pphLinkLog; -} ze_module_dynamic_link_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleDynamicLink -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleDynamicLinkCb_t)( - ze_module_dynamic_link_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleGetNativeBinary -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_get_native_binary_params_t -{ - ze_module_handle_t* phModule; - size_t** ppSize; - uint8_t** ppModuleNativeBinary; -} ze_module_get_native_binary_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleGetNativeBinary -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleGetNativeBinaryCb_t)( - ze_module_get_native_binary_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleGetGlobalPointer -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_get_global_pointer_params_t -{ - ze_module_handle_t* phModule; - const char** ppGlobalName; - size_t** ppSize; - void*** ppptr; -} ze_module_get_global_pointer_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleGetGlobalPointer -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleGetGlobalPointerCb_t)( - ze_module_get_global_pointer_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleGetKernelNames -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_get_kernel_names_params_t -{ - ze_module_handle_t* phModule; - uint32_t** ppCount; - const char*** ppNames; -} ze_module_get_kernel_names_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleGetKernelNames -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleGetKernelNamesCb_t)( - ze_module_get_kernel_names_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleGetProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_get_properties_params_t -{ - ze_module_handle_t* phModule; - ze_module_properties_t** ppModuleProperties; -} ze_module_get_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleGetProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleGetPropertiesCb_t)( - ze_module_get_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleGetFunctionPointer -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_get_function_pointer_params_t -{ - ze_module_handle_t* phModule; - const char** ppFunctionName; - void*** ppfnFunction; -} ze_module_get_function_pointer_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleGetFunctionPointer -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleGetFunctionPointerCb_t)( - ze_module_get_function_pointer_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Module callback functions pointers -typedef struct _ze_module_callbacks_t -{ - ze_pfnModuleCreateCb_t pfnCreateCb; - ze_pfnModuleDestroyCb_t pfnDestroyCb; - ze_pfnModuleDynamicLinkCb_t pfnDynamicLinkCb; - ze_pfnModuleGetNativeBinaryCb_t pfnGetNativeBinaryCb; - ze_pfnModuleGetGlobalPointerCb_t pfnGetGlobalPointerCb; - ze_pfnModuleGetKernelNamesCb_t pfnGetKernelNamesCb; - ze_pfnModuleGetPropertiesCb_t pfnGetPropertiesCb; - ze_pfnModuleGetFunctionPointerCb_t pfnGetFunctionPointerCb; -} ze_module_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleBuildLogDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_build_log_destroy_params_t -{ - ze_module_build_log_handle_t* phModuleBuildLog; -} ze_module_build_log_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleBuildLogDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleBuildLogDestroyCb_t)( - ze_module_build_log_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeModuleBuildLogGetString -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_module_build_log_get_string_params_t -{ - ze_module_build_log_handle_t* phModuleBuildLog; - size_t** ppSize; - char** ppBuildLog; -} ze_module_build_log_get_string_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeModuleBuildLogGetString -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnModuleBuildLogGetStringCb_t)( - ze_module_build_log_get_string_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of ModuleBuildLog callback functions pointers -typedef struct _ze_module_build_log_callbacks_t -{ - ze_pfnModuleBuildLogDestroyCb_t pfnDestroyCb; - ze_pfnModuleBuildLogGetStringCb_t pfnGetStringCb; -} ze_module_build_log_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_create_params_t -{ - ze_module_handle_t* phModule; - const ze_kernel_desc_t** pdesc; - ze_kernel_handle_t** pphKernel; -} ze_kernel_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelCreateCb_t)( - ze_kernel_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_destroy_params_t -{ - ze_kernel_handle_t* phKernel; -} ze_kernel_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelDestroyCb_t)( - ze_kernel_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelSetCacheConfig -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_set_cache_config_params_t -{ - ze_kernel_handle_t* phKernel; - ze_cache_config_flags_t* pflags; -} ze_kernel_set_cache_config_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelSetCacheConfig -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelSetCacheConfigCb_t)( - ze_kernel_set_cache_config_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelSetGroupSize -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_set_group_size_params_t -{ - ze_kernel_handle_t* phKernel; - uint32_t* pgroupSizeX; - uint32_t* pgroupSizeY; - uint32_t* pgroupSizeZ; -} ze_kernel_set_group_size_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelSetGroupSize -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelSetGroupSizeCb_t)( - ze_kernel_set_group_size_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelSuggestGroupSize -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_suggest_group_size_params_t -{ - ze_kernel_handle_t* phKernel; - uint32_t* pglobalSizeX; - uint32_t* pglobalSizeY; - uint32_t* pglobalSizeZ; - uint32_t** pgroupSizeX; - uint32_t** pgroupSizeY; - uint32_t** pgroupSizeZ; -} ze_kernel_suggest_group_size_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelSuggestGroupSize -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelSuggestGroupSizeCb_t)( - ze_kernel_suggest_group_size_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelSuggestMaxCooperativeGroupCount -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_suggest_max_cooperative_group_count_params_t -{ - ze_kernel_handle_t* phKernel; - uint32_t** ptotalGroupCount; -} ze_kernel_suggest_max_cooperative_group_count_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelSuggestMaxCooperativeGroupCount -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelSuggestMaxCooperativeGroupCountCb_t)( - ze_kernel_suggest_max_cooperative_group_count_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelSetArgumentValue -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_set_argument_value_params_t -{ - ze_kernel_handle_t* phKernel; - uint32_t* pargIndex; - size_t* pargSize; - const void** ppArgValue; -} ze_kernel_set_argument_value_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelSetArgumentValue -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelSetArgumentValueCb_t)( - ze_kernel_set_argument_value_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelSetIndirectAccess -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_set_indirect_access_params_t -{ - ze_kernel_handle_t* phKernel; - ze_kernel_indirect_access_flags_t* pflags; -} ze_kernel_set_indirect_access_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelSetIndirectAccess -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelSetIndirectAccessCb_t)( - ze_kernel_set_indirect_access_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelGetIndirectAccess -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_get_indirect_access_params_t -{ - ze_kernel_handle_t* phKernel; - ze_kernel_indirect_access_flags_t** ppFlags; -} ze_kernel_get_indirect_access_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelGetIndirectAccess -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelGetIndirectAccessCb_t)( - ze_kernel_get_indirect_access_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelGetSourceAttributes -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_get_source_attributes_params_t -{ - ze_kernel_handle_t* phKernel; - uint32_t** ppSize; - char*** ppString; -} ze_kernel_get_source_attributes_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelGetSourceAttributes -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelGetSourceAttributesCb_t)( - ze_kernel_get_source_attributes_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelGetProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_get_properties_params_t -{ - ze_kernel_handle_t* phKernel; - ze_kernel_properties_t** ppKernelProperties; -} ze_kernel_get_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelGetProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelGetPropertiesCb_t)( - ze_kernel_get_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeKernelGetName -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_kernel_get_name_params_t -{ - ze_kernel_handle_t* phKernel; - size_t** ppSize; - char** ppName; -} ze_kernel_get_name_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeKernelGetName -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnKernelGetNameCb_t)( - ze_kernel_get_name_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Kernel callback functions pointers -typedef struct _ze_kernel_callbacks_t -{ - ze_pfnKernelCreateCb_t pfnCreateCb; - ze_pfnKernelDestroyCb_t pfnDestroyCb; - ze_pfnKernelSetCacheConfigCb_t pfnSetCacheConfigCb; - ze_pfnKernelSetGroupSizeCb_t pfnSetGroupSizeCb; - ze_pfnKernelSuggestGroupSizeCb_t pfnSuggestGroupSizeCb; - ze_pfnKernelSuggestMaxCooperativeGroupCountCb_t pfnSuggestMaxCooperativeGroupCountCb; - ze_pfnKernelSetArgumentValueCb_t pfnSetArgumentValueCb; - ze_pfnKernelSetIndirectAccessCb_t pfnSetIndirectAccessCb; - ze_pfnKernelGetIndirectAccessCb_t pfnGetIndirectAccessCb; - ze_pfnKernelGetSourceAttributesCb_t pfnGetSourceAttributesCb; - ze_pfnKernelGetPropertiesCb_t pfnGetPropertiesCb; - ze_pfnKernelGetNameCb_t pfnGetNameCb; -} ze_kernel_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeSamplerCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_sampler_create_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - const ze_sampler_desc_t** pdesc; - ze_sampler_handle_t** pphSampler; -} ze_sampler_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeSamplerCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnSamplerCreateCb_t)( - ze_sampler_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeSamplerDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_sampler_destroy_params_t -{ - ze_sampler_handle_t* phSampler; -} ze_sampler_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeSamplerDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnSamplerDestroyCb_t)( - ze_sampler_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Sampler callback functions pointers -typedef struct _ze_sampler_callbacks_t -{ - ze_pfnSamplerCreateCb_t pfnCreateCb; - ze_pfnSamplerDestroyCb_t pfnDestroyCb; -} ze_sampler_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zePhysicalMemCreate -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_physical_mem_create_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - ze_physical_mem_desc_t** pdesc; - ze_physical_mem_handle_t** pphPhysicalMemory; -} ze_physical_mem_create_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zePhysicalMemCreate -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnPhysicalMemCreateCb_t)( - ze_physical_mem_create_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zePhysicalMemDestroy -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_physical_mem_destroy_params_t -{ - ze_context_handle_t* phContext; - ze_physical_mem_handle_t* phPhysicalMemory; -} ze_physical_mem_destroy_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zePhysicalMemDestroy -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnPhysicalMemDestroyCb_t)( - ze_physical_mem_destroy_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of PhysicalMem callback functions pointers -typedef struct _ze_physical_mem_callbacks_t -{ - ze_pfnPhysicalMemCreateCb_t pfnCreateCb; - ze_pfnPhysicalMemDestroyCb_t pfnDestroyCb; -} ze_physical_mem_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemAllocShared -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_alloc_shared_params_t -{ - ze_context_handle_t* phContext; - const ze_device_mem_alloc_desc_t** pdevice_desc; - const ze_host_mem_alloc_desc_t** phost_desc; - size_t* psize; - size_t* palignment; - ze_device_handle_t* phDevice; - void*** ppptr; -} ze_mem_alloc_shared_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemAllocShared -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemAllocSharedCb_t)( - ze_mem_alloc_shared_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemAllocDevice -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_alloc_device_params_t -{ - ze_context_handle_t* phContext; - const ze_device_mem_alloc_desc_t** pdevice_desc; - size_t* psize; - size_t* palignment; - ze_device_handle_t* phDevice; - void*** ppptr; -} ze_mem_alloc_device_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemAllocDevice -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemAllocDeviceCb_t)( - ze_mem_alloc_device_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemAllocHost -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_alloc_host_params_t -{ - ze_context_handle_t* phContext; - const ze_host_mem_alloc_desc_t** phost_desc; - size_t* psize; - size_t* palignment; - void*** ppptr; -} ze_mem_alloc_host_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemAllocHost -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemAllocHostCb_t)( - ze_mem_alloc_host_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemFree -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_free_params_t -{ - ze_context_handle_t* phContext; - void** pptr; -} ze_mem_free_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemFree -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemFreeCb_t)( - ze_mem_free_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemGetAllocProperties -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_get_alloc_properties_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - ze_memory_allocation_properties_t** ppMemAllocProperties; - ze_device_handle_t** pphDevice; -} ze_mem_get_alloc_properties_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemGetAllocProperties -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemGetAllocPropertiesCb_t)( - ze_mem_get_alloc_properties_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemGetAddressRange -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_get_address_range_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - void*** ppBase; - size_t** ppSize; -} ze_mem_get_address_range_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemGetAddressRange -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemGetAddressRangeCb_t)( - ze_mem_get_address_range_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemGetIpcHandle -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_get_ipc_handle_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - ze_ipc_mem_handle_t** ppIpcHandle; -} ze_mem_get_ipc_handle_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemGetIpcHandle -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemGetIpcHandleCb_t)( - ze_mem_get_ipc_handle_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemOpenIpcHandle -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_open_ipc_handle_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - ze_ipc_mem_handle_t* phandle; - ze_ipc_memory_flags_t* pflags; - void*** ppptr; -} ze_mem_open_ipc_handle_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemOpenIpcHandle -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemOpenIpcHandleCb_t)( - ze_mem_open_ipc_handle_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeMemCloseIpcHandle -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_mem_close_ipc_handle_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; -} ze_mem_close_ipc_handle_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeMemCloseIpcHandle -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnMemCloseIpcHandleCb_t)( - ze_mem_close_ipc_handle_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of Mem callback functions pointers -typedef struct _ze_mem_callbacks_t -{ - ze_pfnMemAllocSharedCb_t pfnAllocSharedCb; - ze_pfnMemAllocDeviceCb_t pfnAllocDeviceCb; - ze_pfnMemAllocHostCb_t pfnAllocHostCb; - ze_pfnMemFreeCb_t pfnFreeCb; - ze_pfnMemGetAllocPropertiesCb_t pfnGetAllocPropertiesCb; - ze_pfnMemGetAddressRangeCb_t pfnGetAddressRangeCb; - ze_pfnMemGetIpcHandleCb_t pfnGetIpcHandleCb; - ze_pfnMemOpenIpcHandleCb_t pfnOpenIpcHandleCb; - ze_pfnMemCloseIpcHandleCb_t pfnCloseIpcHandleCb; -} ze_mem_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeVirtualMemReserve -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_virtual_mem_reserve_params_t -{ - ze_context_handle_t* phContext; - const void** ppStart; - size_t* psize; - void*** ppptr; -} ze_virtual_mem_reserve_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeVirtualMemReserve -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnVirtualMemReserveCb_t)( - ze_virtual_mem_reserve_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeVirtualMemFree -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_virtual_mem_free_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - size_t* psize; -} ze_virtual_mem_free_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeVirtualMemFree -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnVirtualMemFreeCb_t)( - ze_virtual_mem_free_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeVirtualMemQueryPageSize -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_virtual_mem_query_page_size_params_t -{ - ze_context_handle_t* phContext; - ze_device_handle_t* phDevice; - size_t* psize; - size_t** ppagesize; -} ze_virtual_mem_query_page_size_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeVirtualMemQueryPageSize -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnVirtualMemQueryPageSizeCb_t)( - ze_virtual_mem_query_page_size_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeVirtualMemMap -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_virtual_mem_map_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - size_t* psize; - ze_physical_mem_handle_t* phPhysicalMemory; - size_t* poffset; - ze_memory_access_attribute_t* paccess; -} ze_virtual_mem_map_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeVirtualMemMap -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnVirtualMemMapCb_t)( - ze_virtual_mem_map_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeVirtualMemUnmap -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_virtual_mem_unmap_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - size_t* psize; -} ze_virtual_mem_unmap_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeVirtualMemUnmap -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnVirtualMemUnmapCb_t)( - ze_virtual_mem_unmap_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeVirtualMemSetAccessAttribute -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_virtual_mem_set_access_attribute_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - size_t* psize; - ze_memory_access_attribute_t* paccess; -} ze_virtual_mem_set_access_attribute_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeVirtualMemSetAccessAttribute -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnVirtualMemSetAccessAttributeCb_t)( - ze_virtual_mem_set_access_attribute_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function parameters for zeVirtualMemGetAccessAttribute -/// @details Each entry is a pointer to the parameter passed to the function; -/// allowing the callback the ability to modify the parameter's value -typedef struct _ze_virtual_mem_get_access_attribute_params_t -{ - ze_context_handle_t* phContext; - const void** pptr; - size_t* psize; - ze_memory_access_attribute_t** paccess; - size_t** poutSize; -} ze_virtual_mem_get_access_attribute_params_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Callback function-pointer for zeVirtualMemGetAccessAttribute -/// @param[in] params Parameters passed to this instance -/// @param[in] result Return value -/// @param[in] pTracerUserData Per-Tracer user data -/// @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data -typedef void (ZE_APICALL *ze_pfnVirtualMemGetAccessAttributeCb_t)( - ze_virtual_mem_get_access_attribute_params_t* params, - ze_result_t result, - void* pTracerUserData, - void** ppTracerInstanceUserData - ); - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Table of VirtualMem callback functions pointers -typedef struct _ze_virtual_mem_callbacks_t -{ - ze_pfnVirtualMemReserveCb_t pfnReserveCb; - ze_pfnVirtualMemFreeCb_t pfnFreeCb; - ze_pfnVirtualMemQueryPageSizeCb_t pfnQueryPageSizeCb; - ze_pfnVirtualMemMapCb_t pfnMapCb; - ze_pfnVirtualMemUnmapCb_t pfnUnmapCb; - ze_pfnVirtualMemSetAccessAttributeCb_t pfnSetAccessAttributeCb; - ze_pfnVirtualMemGetAccessAttributeCb_t pfnGetAccessAttributeCb; -} ze_virtual_mem_callbacks_t; - -/////////////////////////////////////////////////////////////////////////////// -/// @brief Container for all callbacks -typedef struct _ze_callbacks_t -{ - ze_global_callbacks_t Global; - ze_driver_callbacks_t Driver; - ze_device_callbacks_t Device; - ze_context_callbacks_t Context; - ze_command_queue_callbacks_t CommandQueue; - ze_command_list_callbacks_t CommandList; - ze_fence_callbacks_t Fence; - ze_event_pool_callbacks_t EventPool; - ze_event_callbacks_t Event; - ze_image_callbacks_t Image; - ze_module_callbacks_t Module; - ze_module_build_log_callbacks_t ModuleBuildLog; - ze_kernel_callbacks_t Kernel; - ze_sampler_callbacks_t Sampler; - ze_physical_mem_callbacks_t PhysicalMem; - ze_mem_callbacks_t Mem; - ze_virtual_mem_callbacks_t VirtualMem; -} ze_callbacks_t; - -#if !defined(__GNUC__) -#pragma endregion -#endif - -#if defined(__cplusplus) -} // extern "C" -#endif - -#endif // _ZE_API_H \ No newline at end of file diff --git a/level_zero-sys/lib/ze_loader.def b/level_zero-sys/lib/ze_loader.def deleted file mode 100644 index d95ffad9..00000000 --- a/level_zero-sys/lib/ze_loader.def +++ /dev/null @@ -1,316 +0,0 @@ -EXPORTS -zeCommandListAppendBarrier -zeCommandListAppendEventReset -zeCommandListAppendImageCopy -zeCommandListAppendImageCopyFromMemory -zeCommandListAppendImageCopyRegion -zeCommandListAppendImageCopyToMemory -zeCommandListAppendLaunchCooperativeKernel -zeCommandListAppendLaunchKernel -zeCommandListAppendLaunchKernelIndirect -zeCommandListAppendLaunchMultipleKernelsIndirect -zeCommandListAppendMemAdvise -zeCommandListAppendMemoryCopy -zeCommandListAppendMemoryCopyFromContext -zeCommandListAppendMemoryCopyRegion -zeCommandListAppendMemoryFill -zeCommandListAppendMemoryPrefetch -zeCommandListAppendMemoryRangesBarrier -zeCommandListAppendQueryKernelTimestamps -zeCommandListAppendSignalEvent -zeCommandListAppendWaitOnEvents -zeCommandListAppendWriteGlobalTimestamp -zeCommandListClose -zeCommandListCreate -zeCommandListCreateImmediate -zeCommandListDestroy -zeCommandListReset -zeCommandQueueCreate -zeCommandQueueDestroy -zeCommandQueueExecuteCommandLists -zeCommandQueueSynchronize -zeContextCreate -zeContextCreateEx -zeContextDestroy -zeContextEvictImage -zeContextEvictMemory -zeContextGetStatus -zeContextMakeImageResident -zeContextMakeMemoryResident -zeContextSystemBarrier -zeDeviceCanAccessPeer -zeDeviceGet -zeDeviceGetCacheProperties -zeDeviceGetCommandQueueGroupProperties -zeDeviceGetComputeProperties -zeDeviceGetExternalMemoryProperties -zeDeviceGetGlobalTimestamps -zeDeviceGetImageProperties -zeDeviceGetMemoryAccessProperties -zeDeviceGetMemoryProperties -zeDeviceGetModuleProperties -zeDeviceGetP2PProperties -zeDeviceGetProperties -zeDeviceGetStatus -zeDeviceGetSubDevices -zeDriverGet -zeDriverGetApiVersion -zeDriverGetExtensionFunctionAddress -zeDriverGetExtensionProperties -zeDriverGetIpcProperties -zeDriverGetProperties -zeEventCreate -zeEventDestroy -zeEventHostReset -zeEventHostSignal -zeEventHostSynchronize -zeEventPoolCloseIpcHandle -zeEventPoolCreate -zeEventPoolDestroy -zeEventPoolGetIpcHandle -zeEventPoolOpenIpcHandle -zeEventQueryKernelTimestamp -zeEventQueryStatus -zeFenceCreate -zeFenceDestroy -zeFenceHostSynchronize -zeFenceQueryStatus -zeFenceReset -zeGetCommandListProcAddrTable -zeGetCommandQueueProcAddrTable -zeGetContextProcAddrTable -zeGetDeviceProcAddrTable -zeGetDriverProcAddrTable -zeGetEventPoolProcAddrTable -zeGetEventProcAddrTable -zeGetFenceProcAddrTable -zeGetGlobalProcAddrTable -zeGetImageProcAddrTable -zeGetKernelExpProcAddrTable -zeGetKernelProcAddrTable -zeGetMemProcAddrTable -zeGetModuleBuildLogProcAddrTable -zeGetModuleProcAddrTable -zeGetPhysicalMemProcAddrTable -zeGetSamplerProcAddrTable -zeGetVirtualMemProcAddrTable -zeImageCreate -zeImageDestroy -zeImageGetProperties -zeInit -zeKernelCreate -zeKernelDestroy -zeKernelGetIndirectAccess -zeKernelGetName -zeKernelGetProperties -zeKernelGetSourceAttributes -zeKernelSetArgumentValue -zeKernelSetCacheConfig -zeKernelSetGlobalOffsetExp -zeKernelSetGroupSize -zeKernelSetIndirectAccess -zeKernelSuggestGroupSize -zeKernelSuggestMaxCooperativeGroupCount -zeLoaderInit -zeMemAllocDevice -zeMemAllocHost -zeMemAllocShared -zeMemCloseIpcHandle -zeMemFree -zeMemGetAddressRange -zeMemGetAllocProperties -zeMemGetIpcHandle -zeMemOpenIpcHandle -zeModuleBuildLogDestroy -zeModuleBuildLogGetString -zeModuleCreate -zeModuleDestroy -zeModuleDynamicLink -zeModuleGetFunctionPointer -zeModuleGetGlobalPointer -zeModuleGetKernelNames -zeModuleGetNativeBinary -zeModuleGetProperties -zePhysicalMemCreate -zePhysicalMemDestroy -zeSamplerCreate -zeSamplerDestroy -zeVirtualMemFree -zeVirtualMemGetAccessAttribute -zeVirtualMemMap -zeVirtualMemQueryPageSize -zeVirtualMemReserve -zeVirtualMemSetAccessAttribute -zeVirtualMemUnmap -zelGetTracerApiProcAddrTable -zelTracerCreate -zelTracerDestroy -zelTracerSetEnabled -zelTracerSetEpilogues -zelTracerSetPrologues -zesDeviceEnumDiagnosticTestSuites -zesDeviceEnumEngineGroups -zesDeviceEnumFabricPorts -zesDeviceEnumFans -zesDeviceEnumFirmwares -zesDeviceEnumFrequencyDomains -zesDeviceEnumLeds -zesDeviceEnumMemoryModules -zesDeviceEnumPerformanceFactorDomains -zesDeviceEnumPowerDomains -zesDeviceEnumPsus -zesDeviceEnumRasErrorSets -zesDeviceEnumSchedulers -zesDeviceEnumStandbyDomains -zesDeviceEnumTemperatureSensors -zesDeviceEventRegister -zesDeviceGetProperties -zesDeviceGetState -zesDevicePciGetBars -zesDevicePciGetProperties -zesDevicePciGetState -zesDevicePciGetStats -zesDeviceProcessesGetState -zesDeviceReset -zesDiagnosticsGetProperties -zesDiagnosticsGetTests -zesDiagnosticsRunTests -zesDriverEventListen -zesDriverEventListenEx -zesEngineGetActivity -zesEngineGetProperties -zesFabricPortGetConfig -zesFabricPortGetLinkType -zesFabricPortGetProperties -zesFabricPortGetState -zesFabricPortGetThroughput -zesFabricPortSetConfig -zesFanGetConfig -zesFanGetProperties -zesFanGetState -zesFanSetDefaultMode -zesFanSetFixedSpeedMode -zesFanSetSpeedTableMode -zesFirmwareFlash -zesFirmwareGetProperties -zesFrequencyGetAvailableClocks -zesFrequencyGetProperties -zesFrequencyGetRange -zesFrequencyGetState -zesFrequencyGetThrottleTime -zesFrequencyOcGetCapabilities -zesFrequencyOcGetFrequencyTarget -zesFrequencyOcGetIccMax -zesFrequencyOcGetMode -zesFrequencyOcGetTjMax -zesFrequencyOcGetVoltageTarget -zesFrequencyOcSetFrequencyTarget -zesFrequencyOcSetIccMax -zesFrequencyOcSetMode -zesFrequencyOcSetTjMax -zesFrequencyOcSetVoltageTarget -zesFrequencySetRange -zesGetDeviceProcAddrTable -zesGetDiagnosticsProcAddrTable -zesGetDriverProcAddrTable -zesGetEngineProcAddrTable -zesGetFabricPortProcAddrTable -zesGetFanProcAddrTable -zesGetFirmwareProcAddrTable -zesGetFrequencyProcAddrTable -zesGetLedProcAddrTable -zesGetMemoryProcAddrTable -zesGetPerformanceFactorProcAddrTable -zesGetPowerProcAddrTable -zesGetPsuProcAddrTable -zesGetRasProcAddrTable -zesGetSchedulerProcAddrTable -zesGetStandbyProcAddrTable -zesGetTemperatureProcAddrTable -zesLedGetProperties -zesLedGetState -zesLedSetColor -zesLedSetState -zesMemoryGetBandwidth -zesMemoryGetProperties -zesMemoryGetState -zesPerformanceFactorGetConfig -zesPerformanceFactorGetProperties -zesPerformanceFactorSetConfig -zesPowerGetEnergyCounter -zesPowerGetEnergyThreshold -zesPowerGetLimits -zesPowerGetProperties -zesPowerSetEnergyThreshold -zesPowerSetLimits -zesPsuGetProperties -zesPsuGetState -zesRasGetConfig -zesRasGetProperties -zesRasGetState -zesRasSetConfig -zesSchedulerGetCurrentMode -zesSchedulerGetProperties -zesSchedulerGetTimeoutModeProperties -zesSchedulerGetTimesliceModeProperties -zesSchedulerSetComputeUnitDebugMode -zesSchedulerSetExclusiveMode -zesSchedulerSetTimeoutMode -zesSchedulerSetTimesliceMode -zesStandbyGetMode -zesStandbyGetProperties -zesStandbySetMode -zesTemperatureGetConfig -zesTemperatureGetProperties -zesTemperatureGetState -zesTemperatureSetConfig -zetCommandListAppendMetricMemoryBarrier -zetCommandListAppendMetricQueryBegin -zetCommandListAppendMetricQueryEnd -zetCommandListAppendMetricStreamerMarker -zetContextActivateMetricGroups -zetDebugAcknowledgeEvent -zetDebugAttach -zetDebugDetach -zetDebugGetRegisterSetProperties -zetDebugInterrupt -zetDebugReadEvent -zetDebugReadMemory -zetDebugReadRegisters -zetDebugResume -zetDebugWriteMemory -zetDebugWriteRegisters -zetDeviceGetDebugProperties -zetGetCommandListProcAddrTable -zetGetContextProcAddrTable -zetGetDebugProcAddrTable -zetGetDeviceProcAddrTable -zetGetKernelProcAddrTable -zetGetMetricGroupProcAddrTable -zetGetMetricProcAddrTable -zetGetMetricQueryPoolProcAddrTable -zetGetMetricQueryProcAddrTable -zetGetMetricStreamerProcAddrTable -zetGetModuleProcAddrTable -zetGetTracerExpProcAddrTable -zetKernelGetProfileInfo -zetMetricGet -zetMetricGetProperties -zetMetricGroupCalculateMetricValues -zetMetricGroupGet -zetMetricGroupGetProperties -zetMetricQueryCreate -zetMetricQueryDestroy -zetMetricQueryGetData -zetMetricQueryPoolCreate -zetMetricQueryPoolDestroy -zetMetricQueryReset -zetMetricStreamerClose -zetMetricStreamerOpen -zetMetricStreamerReadData -zetModuleGetDebugInfo -zetTracerExpCreate -zetTracerExpDestroy -zetTracerExpSetEnabled -zetTracerExpSetEpilogues -zetTracerExpSetPrologues \ No newline at end of file diff --git a/level_zero-sys/lib/ze_loader.lib b/level_zero-sys/lib/ze_loader.lib deleted file mode 100644 index 525496a5..00000000 Binary files a/level_zero-sys/lib/ze_loader.lib and /dev/null differ diff --git a/level_zero-sys/src/lib.rs b/level_zero-sys/src/lib.rs deleted file mode 100644 index f7a7feb6..00000000 --- a/level_zero-sys/src/lib.rs +++ /dev/null @@ -1,3 +0,0 @@ -#![allow(warnings)] -pub mod ze_api; -pub use ze_api::*; \ No newline at end of file diff --git a/level_zero-sys/src/ze_api.rs b/level_zero-sys/src/ze_api.rs deleted file mode 100644 index 3b9974ad..00000000 --- a/level_zero-sys/src/ze_api.rs +++ /dev/null @@ -1,24408 +0,0 @@ -/* automatically generated by rust-bindgen 0.58.1 */ - -#[doc = ""] -#[doc = " @brief compiler-independent type"] -pub type ze_bool_t = u8; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of a driver instance"] -pub type ze_driver_handle_t = *mut _ze_driver_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's device object"] -pub type ze_device_handle_t = *mut _ze_device_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's context object"] -pub type ze_context_handle_t = *mut _ze_context_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_queue_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's command queue object"] -pub type ze_command_queue_handle_t = *mut _ze_command_queue_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's command list object"] -pub type ze_command_list_handle_t = *mut _ze_command_list_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_fence_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's fence object"] -pub type ze_fence_handle_t = *mut _ze_fence_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_pool_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's event pool object"] -pub type ze_event_pool_handle_t = *mut _ze_event_pool_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's event object"] -pub type ze_event_handle_t = *mut _ze_event_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_image_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's image object"] -pub type ze_image_handle_t = *mut _ze_image_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's module object"] -pub type ze_module_handle_t = *mut _ze_module_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_build_log_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of module's build log object"] -pub type ze_module_build_log_handle_t = *mut _ze_module_build_log_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's kernel object"] -pub type ze_kernel_handle_t = *mut _ze_kernel_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_sampler_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of driver's sampler object"] -pub type ze_sampler_handle_t = *mut _ze_sampler_handle_t; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_physical_mem_handle_t { - _unused: [u8; 0], -} -#[doc = ""] -#[doc = " @brief Handle of physical memory object"] -pub type ze_physical_mem_handle_t = *mut _ze_physical_mem_handle_t; -#[doc = ""] -#[doc = " @brief IPC handle to a memory allocation"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct _ze_ipc_mem_handle_t { - #[doc = "< [out] Opaque data representing an IPC handle"] - pub data: [::std::os::raw::c_char; 64usize], -} -#[test] -fn bindgen_test_layout__ze_ipc_mem_handle_t() { - assert_eq!( - ::std::mem::size_of::<_ze_ipc_mem_handle_t>(), - 64usize, - concat!("Size of: ", stringify!(_ze_ipc_mem_handle_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_ipc_mem_handle_t>(), - 1usize, - concat!("Alignment of ", stringify!(_ze_ipc_mem_handle_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_ipc_mem_handle_t>())).data as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_ipc_mem_handle_t), - "::", - stringify!(data) - ) - ); -} -#[doc = ""] -#[doc = " @brief IPC handle to a memory allocation"] -pub type ze_ipc_mem_handle_t = _ze_ipc_mem_handle_t; -#[doc = ""] -#[doc = " @brief IPC handle to a event pool allocation"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct _ze_ipc_event_pool_handle_t { - #[doc = "< [out] Opaque data representing an IPC handle"] - pub data: [::std::os::raw::c_char; 64usize], -} -#[test] -fn bindgen_test_layout__ze_ipc_event_pool_handle_t() { - assert_eq!( - ::std::mem::size_of::<_ze_ipc_event_pool_handle_t>(), - 64usize, - concat!("Size of: ", stringify!(_ze_ipc_event_pool_handle_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_ipc_event_pool_handle_t>(), - 1usize, - concat!("Alignment of ", stringify!(_ze_ipc_event_pool_handle_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_ipc_event_pool_handle_t>())).data as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_ipc_event_pool_handle_t), - "::", - stringify!(data) - ) - ); -} -#[doc = ""] -#[doc = " @brief IPC handle to a event pool allocation"] -pub type ze_ipc_event_pool_handle_t = _ze_ipc_event_pool_handle_t; -impl _ze_result_t { - #[doc = "< [Core] success"] - pub const ZE_RESULT_SUCCESS: _ze_result_t = _ze_result_t(0); -} -impl _ze_result_t { - #[doc = "< [Core] synchronization primitive not signaled"] - pub const ZE_RESULT_NOT_READY: _ze_result_t = _ze_result_t(1); -} -impl _ze_result_t { - #[doc = "< [Core] device hung, reset, was removed, or driver update occurred"] - pub const ZE_RESULT_ERROR_DEVICE_LOST: _ze_result_t = _ze_result_t(1879048193); -} -impl _ze_result_t { - #[doc = "< [Core] insufficient host memory to satisfy call"] - pub const ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY: _ze_result_t = _ze_result_t(1879048194); -} -impl _ze_result_t { - #[doc = "< [Core] insufficient device memory to satisfy call"] - pub const ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY: _ze_result_t = _ze_result_t(1879048195); -} -impl _ze_result_t { - #[doc = "< [Core] error occurred when building module, see build log for details"] - pub const ZE_RESULT_ERROR_MODULE_BUILD_FAILURE: _ze_result_t = _ze_result_t(1879048196); -} -impl _ze_result_t { - #[doc = "< [Core] error occurred when linking modules, see build log for details"] - pub const ZE_RESULT_ERROR_MODULE_LINK_FAILURE: _ze_result_t = _ze_result_t(1879048197); -} -impl _ze_result_t { - #[doc = "< [Sysman] access denied due to permission level"] - pub const ZE_RESULT_ERROR_INSUFFICIENT_PERMISSIONS: _ze_result_t = _ze_result_t(1879113728); -} -impl _ze_result_t { - #[doc = "< [Sysman] resource already in use and simultaneous access not allowed"] - #[doc = "< or resource was removed"] - pub const ZE_RESULT_ERROR_NOT_AVAILABLE: _ze_result_t = _ze_result_t(1879113729); -} -impl _ze_result_t { - #[doc = "< [Tools] external required dependency is unavailable or missing"] - pub const ZE_RESULT_ERROR_DEPENDENCY_UNAVAILABLE: _ze_result_t = _ze_result_t(1879179264); -} -impl _ze_result_t { - #[doc = "< [Validation] driver is not initialized"] - pub const ZE_RESULT_ERROR_UNINITIALIZED: _ze_result_t = _ze_result_t(2013265921); -} -impl _ze_result_t { - #[doc = "< [Validation] generic error code for unsupported versions"] - pub const ZE_RESULT_ERROR_UNSUPPORTED_VERSION: _ze_result_t = _ze_result_t(2013265922); -} -impl _ze_result_t { - #[doc = "< [Validation] generic error code for unsupported features"] - pub const ZE_RESULT_ERROR_UNSUPPORTED_FEATURE: _ze_result_t = _ze_result_t(2013265923); -} -impl _ze_result_t { - #[doc = "< [Validation] generic error code for invalid arguments"] - pub const ZE_RESULT_ERROR_INVALID_ARGUMENT: _ze_result_t = _ze_result_t(2013265924); -} -impl _ze_result_t { - #[doc = "< [Validation] handle argument is not valid"] - pub const ZE_RESULT_ERROR_INVALID_NULL_HANDLE: _ze_result_t = _ze_result_t(2013265925); -} -impl _ze_result_t { - #[doc = "< [Validation] object pointed to by handle still in-use by device"] - pub const ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE: _ze_result_t = _ze_result_t(2013265926); -} -impl _ze_result_t { - #[doc = "< [Validation] pointer argument may not be nullptr"] - pub const ZE_RESULT_ERROR_INVALID_NULL_POINTER: _ze_result_t = _ze_result_t(2013265927); -} -impl _ze_result_t { - #[doc = "< [Validation] size argument is invalid (e.g., must not be zero)"] - pub const ZE_RESULT_ERROR_INVALID_SIZE: _ze_result_t = _ze_result_t(2013265928); -} -impl _ze_result_t { - #[doc = "< [Validation] size argument is not supported by the device (e.g., too"] - #[doc = "< large)"] - pub const ZE_RESULT_ERROR_UNSUPPORTED_SIZE: _ze_result_t = _ze_result_t(2013265929); -} -impl _ze_result_t { - #[doc = "< [Validation] alignment argument is not supported by the device (e.g.,"] - #[doc = "< too small)"] - pub const ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT: _ze_result_t = _ze_result_t(2013265930); -} -impl _ze_result_t { - #[doc = "< [Validation] synchronization object in invalid state"] - pub const ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT: _ze_result_t = - _ze_result_t(2013265931); -} -impl _ze_result_t { - #[doc = "< [Validation] enumerator argument is not valid"] - pub const ZE_RESULT_ERROR_INVALID_ENUMERATION: _ze_result_t = _ze_result_t(2013265932); -} -impl _ze_result_t { - #[doc = "< [Validation] enumerator argument is not supported by the device"] - pub const ZE_RESULT_ERROR_UNSUPPORTED_ENUMERATION: _ze_result_t = _ze_result_t(2013265933); -} -impl _ze_result_t { - #[doc = "< [Validation] image format is not supported by the device"] - pub const ZE_RESULT_ERROR_UNSUPPORTED_IMAGE_FORMAT: _ze_result_t = _ze_result_t(2013265934); -} -impl _ze_result_t { - #[doc = "< [Validation] native binary is not supported by the device"] - pub const ZE_RESULT_ERROR_INVALID_NATIVE_BINARY: _ze_result_t = _ze_result_t(2013265935); -} -impl _ze_result_t { - #[doc = "< [Validation] global variable is not found in the module"] - pub const ZE_RESULT_ERROR_INVALID_GLOBAL_NAME: _ze_result_t = _ze_result_t(2013265936); -} -impl _ze_result_t { - #[doc = "< [Validation] kernel name is not found in the module"] - pub const ZE_RESULT_ERROR_INVALID_KERNEL_NAME: _ze_result_t = _ze_result_t(2013265937); -} -impl _ze_result_t { - #[doc = "< [Validation] function name is not found in the module"] - pub const ZE_RESULT_ERROR_INVALID_FUNCTION_NAME: _ze_result_t = _ze_result_t(2013265938); -} -impl _ze_result_t { - #[doc = "< [Validation] group size dimension is not valid for the kernel or"] - #[doc = "< device"] - pub const ZE_RESULT_ERROR_INVALID_GROUP_SIZE_DIMENSION: _ze_result_t = _ze_result_t(2013265939); -} -impl _ze_result_t { - #[doc = "< [Validation] global width dimension is not valid for the kernel or"] - #[doc = "< device"] - pub const ZE_RESULT_ERROR_INVALID_GLOBAL_WIDTH_DIMENSION: _ze_result_t = - _ze_result_t(2013265940); -} -impl _ze_result_t { - #[doc = "< [Validation] kernel argument index is not valid for kernel"] - pub const ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_INDEX: _ze_result_t = - _ze_result_t(2013265941); -} -impl _ze_result_t { - #[doc = "< [Validation] kernel argument size does not match kernel"] - pub const ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_SIZE: _ze_result_t = _ze_result_t(2013265942); -} -impl _ze_result_t { - #[doc = "< [Validation] value of kernel attribute is not valid for the kernel or"] - #[doc = "< device"] - pub const ZE_RESULT_ERROR_INVALID_KERNEL_ATTRIBUTE_VALUE: _ze_result_t = - _ze_result_t(2013265943); -} -impl _ze_result_t { - #[doc = "< [Validation] module with imports needs to be linked before kernels can"] - #[doc = "< be created from it."] - pub const ZE_RESULT_ERROR_INVALID_MODULE_UNLINKED: _ze_result_t = _ze_result_t(2013265944); -} -impl _ze_result_t { - #[doc = "< [Validation] command list type does not match command queue type"] - pub const ZE_RESULT_ERROR_INVALID_COMMAND_LIST_TYPE: _ze_result_t = _ze_result_t(2013265945); -} -impl _ze_result_t { - #[doc = "< [Validation] copy operations do not support overlapping regions of"] - #[doc = "< memory"] - pub const ZE_RESULT_ERROR_OVERLAPPING_REGIONS: _ze_result_t = _ze_result_t(2013265946); -} -impl _ze_result_t { - #[doc = "< [Core] unknown or internal error"] - pub const ZE_RESULT_ERROR_UNKNOWN: _ze_result_t = _ze_result_t(2147483646); -} -impl _ze_result_t { - pub const ZE_RESULT_FORCE_UINT32: _ze_result_t = _ze_result_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Defines Return/Error codes"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -#[must_use] -pub struct _ze_result_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Defines Return/Error codes"] -pub use self::_ze_result_t as ze_result_t; -impl _ze_structure_type_t { - #[doc = "< ::ze_driver_properties_t"] - pub const ZE_STRUCTURE_TYPE_DRIVER_PROPERTIES: _ze_structure_type_t = _ze_structure_type_t(1); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_driver_ipc_properties_t"] - pub const ZE_STRUCTURE_TYPE_DRIVER_IPC_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(2); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_PROPERTIES: _ze_structure_type_t = _ze_structure_type_t(3); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_compute_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_COMPUTE_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(4); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_module_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_MODULE_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(5); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_command_queue_group_properties_t"] - pub const ZE_STRUCTURE_TYPE_COMMAND_QUEUE_GROUP_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(6); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_memory_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_MEMORY_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(7); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_memory_access_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_MEMORY_ACCESS_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(8); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_cache_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_CACHE_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(9); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_image_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_IMAGE_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(10); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_p2p_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_P2P_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(11); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_external_memory_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_EXTERNAL_MEMORY_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(12); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_context_desc_t"] - pub const ZE_STRUCTURE_TYPE_CONTEXT_DESC: _ze_structure_type_t = _ze_structure_type_t(13); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_command_queue_desc_t"] - pub const ZE_STRUCTURE_TYPE_COMMAND_QUEUE_DESC: _ze_structure_type_t = _ze_structure_type_t(14); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_command_list_desc_t"] - pub const ZE_STRUCTURE_TYPE_COMMAND_LIST_DESC: _ze_structure_type_t = _ze_structure_type_t(15); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_event_pool_desc_t"] - pub const ZE_STRUCTURE_TYPE_EVENT_POOL_DESC: _ze_structure_type_t = _ze_structure_type_t(16); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_event_desc_t"] - pub const ZE_STRUCTURE_TYPE_EVENT_DESC: _ze_structure_type_t = _ze_structure_type_t(17); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_fence_desc_t"] - pub const ZE_STRUCTURE_TYPE_FENCE_DESC: _ze_structure_type_t = _ze_structure_type_t(18); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_image_desc_t"] - pub const ZE_STRUCTURE_TYPE_IMAGE_DESC: _ze_structure_type_t = _ze_structure_type_t(19); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_image_properties_t"] - pub const ZE_STRUCTURE_TYPE_IMAGE_PROPERTIES: _ze_structure_type_t = _ze_structure_type_t(20); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_mem_alloc_desc_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_MEM_ALLOC_DESC: _ze_structure_type_t = - _ze_structure_type_t(21); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_host_mem_alloc_desc_t"] - pub const ZE_STRUCTURE_TYPE_HOST_MEM_ALLOC_DESC: _ze_structure_type_t = - _ze_structure_type_t(22); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_memory_allocation_properties_t"] - pub const ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(23); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_external_memory_export_desc_t"] - pub const ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_EXPORT_DESC: _ze_structure_type_t = - _ze_structure_type_t(24); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_external_memory_import_fd_t"] - pub const ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMPORT_FD: _ze_structure_type_t = - _ze_structure_type_t(25); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_external_memory_export_fd_t"] - pub const ZE_STRUCTURE_TYPE_EXTERNAL_MEMORY_EXPORT_FD: _ze_structure_type_t = - _ze_structure_type_t(26); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_module_desc_t"] - pub const ZE_STRUCTURE_TYPE_MODULE_DESC: _ze_structure_type_t = _ze_structure_type_t(27); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_module_properties_t"] - pub const ZE_STRUCTURE_TYPE_MODULE_PROPERTIES: _ze_structure_type_t = _ze_structure_type_t(28); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_kernel_desc_t"] - pub const ZE_STRUCTURE_TYPE_KERNEL_DESC: _ze_structure_type_t = _ze_structure_type_t(29); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_kernel_properties_t"] - pub const ZE_STRUCTURE_TYPE_KERNEL_PROPERTIES: _ze_structure_type_t = _ze_structure_type_t(30); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_sampler_desc_t"] - pub const ZE_STRUCTURE_TYPE_SAMPLER_DESC: _ze_structure_type_t = _ze_structure_type_t(31); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_physical_mem_desc_t"] - pub const ZE_STRUCTURE_TYPE_PHYSICAL_MEM_DESC: _ze_structure_type_t = _ze_structure_type_t(32); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_device_raytracing_ext_properties_t"] - pub const ZE_STRUCTURE_TYPE_DEVICE_RAYTRACING_EXT_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(65537); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_raytracing_mem_alloc_ext_desc_t"] - pub const ZE_STRUCTURE_TYPE_RAYTRACING_MEM_ALLOC_EXT_DESC: _ze_structure_type_t = - _ze_structure_type_t(65538); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_float_atomic_ext_properties_t"] - pub const ZE_STRUCTURE_TYPE_FLOAT_ATOMIC_EXT_PROPERTIES: _ze_structure_type_t = - _ze_structure_type_t(65539); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_relaxed_allocation_limits_exp_desc_t"] - pub const ZE_STRUCTURE_TYPE_RELAXED_ALLOCATION_LIMITS_EXP_DESC: _ze_structure_type_t = - _ze_structure_type_t(131073); -} -impl _ze_structure_type_t { - #[doc = "< ::ze_module_program_exp_desc_t"] - pub const ZE_STRUCTURE_TYPE_MODULE_PROGRAM_EXP_DESC: _ze_structure_type_t = - _ze_structure_type_t(131074); -} -impl _ze_structure_type_t { - pub const ZE_STRUCTURE_TYPE_FORCE_UINT32: _ze_structure_type_t = - _ze_structure_type_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Defines structure types"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_structure_type_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Defines structure types"] -pub use self::_ze_structure_type_t as ze_structure_type_t; -impl _ze_external_memory_type_flags_t { - #[doc = "< an opaque POSIX file descriptor handle"] - pub const ZE_EXTERNAL_MEMORY_TYPE_FLAG_OPAQUE_FD: _ze_external_memory_type_flags_t = - _ze_external_memory_type_flags_t(1); -} -impl _ze_external_memory_type_flags_t { - #[doc = "< a file descriptor handle for a Linux dma_buf"] - pub const ZE_EXTERNAL_MEMORY_TYPE_FLAG_DMA_BUF: _ze_external_memory_type_flags_t = - _ze_external_memory_type_flags_t(2); -} -impl _ze_external_memory_type_flags_t { - pub const ZE_EXTERNAL_MEMORY_TYPE_FLAG_FORCE_UINT32: _ze_external_memory_type_flags_t = - _ze_external_memory_type_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_external_memory_type_flags_t> for _ze_external_memory_type_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_external_memory_type_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_external_memory_type_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_external_memory_type_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_external_memory_type_flags_t> for _ze_external_memory_type_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_external_memory_type_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_external_memory_type_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_external_memory_type_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief External memory type flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_external_memory_type_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief External memory type flags"] -pub use self::_ze_external_memory_type_flags_t as ze_external_memory_type_flags_t; -#[doc = ""] -#[doc = " @brief Base for all properties types"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_base_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_base_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_base_properties_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_base_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_base_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_base_properties_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_base_properties_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_base_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_base_properties_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_base_properties_t), - "::", - stringify!(pNext) - ) - ); -} -impl Default for _ze_base_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -#[doc = ""] -#[doc = " @brief Base for all properties types"] -pub type ze_base_properties_t = _ze_base_properties_t; -#[doc = ""] -#[doc = " @brief Base for all descriptor types"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_base_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_base_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_base_desc_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_base_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_base_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_base_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_base_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_base_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_base_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_base_desc_t), - "::", - stringify!(pNext) - ) - ); -} -#[doc = ""] -#[doc = " @brief Base for all descriptor types"] -pub type ze_base_desc_t = _ze_base_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_driver_uuid_t"] -pub type ze_driver_uuid_t = _ze_driver_uuid_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_driver_properties_t"] -pub type ze_driver_properties_t = _ze_driver_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_driver_ipc_properties_t"] -pub type ze_driver_ipc_properties_t = _ze_driver_ipc_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_driver_extension_properties_t"] -pub type ze_driver_extension_properties_t = _ze_driver_extension_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_uuid_t"] -pub type ze_device_uuid_t = _ze_device_uuid_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_properties_t"] -pub type ze_device_properties_t = _ze_device_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_thread_t"] -pub type ze_device_thread_t = _ze_device_thread_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_compute_properties_t"] -pub type ze_device_compute_properties_t = _ze_device_compute_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_native_kernel_uuid_t"] -pub type ze_native_kernel_uuid_t = _ze_native_kernel_uuid_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_module_properties_t"] -pub type ze_device_module_properties_t = _ze_device_module_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_command_queue_group_properties_t"] -pub type ze_command_queue_group_properties_t = _ze_command_queue_group_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_memory_properties_t"] -pub type ze_device_memory_properties_t = _ze_device_memory_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_memory_access_properties_t"] -pub type ze_device_memory_access_properties_t = _ze_device_memory_access_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_cache_properties_t"] -pub type ze_device_cache_properties_t = _ze_device_cache_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_image_properties_t"] -pub type ze_device_image_properties_t = _ze_device_image_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_external_memory_properties_t"] -pub type ze_device_external_memory_properties_t = _ze_device_external_memory_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_p2p_properties_t"] -pub type ze_device_p2p_properties_t = _ze_device_p2p_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_context_desc_t"] -pub type ze_context_desc_t = _ze_context_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_command_queue_desc_t"] -pub type ze_command_queue_desc_t = _ze_command_queue_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_command_list_desc_t"] -pub type ze_command_list_desc_t = _ze_command_list_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_copy_region_t"] -pub type ze_copy_region_t = _ze_copy_region_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_image_region_t"] -pub type ze_image_region_t = _ze_image_region_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_event_pool_desc_t"] -pub type ze_event_pool_desc_t = _ze_event_pool_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_event_desc_t"] -pub type ze_event_desc_t = _ze_event_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_kernel_timestamp_data_t"] -pub type ze_kernel_timestamp_data_t = _ze_kernel_timestamp_data_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_kernel_timestamp_result_t"] -pub type ze_kernel_timestamp_result_t = _ze_kernel_timestamp_result_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_fence_desc_t"] -pub type ze_fence_desc_t = _ze_fence_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_image_format_t"] -pub type ze_image_format_t = _ze_image_format_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_image_desc_t"] -pub type ze_image_desc_t = _ze_image_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_image_properties_t"] -pub type ze_image_properties_t = _ze_image_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_mem_alloc_desc_t"] -pub type ze_device_mem_alloc_desc_t = _ze_device_mem_alloc_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_host_mem_alloc_desc_t"] -pub type ze_host_mem_alloc_desc_t = _ze_host_mem_alloc_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_memory_allocation_properties_t"] -pub type ze_memory_allocation_properties_t = _ze_memory_allocation_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_external_memory_export_desc_t"] -pub type ze_external_memory_export_desc_t = _ze_external_memory_export_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_external_memory_import_fd_t"] -pub type ze_external_memory_import_fd_t = _ze_external_memory_import_fd_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_external_memory_export_fd_t"] -pub type ze_external_memory_export_fd_t = _ze_external_memory_export_fd_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_module_constants_t"] -pub type ze_module_constants_t = _ze_module_constants_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_module_desc_t"] -pub type ze_module_desc_t = _ze_module_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_module_properties_t"] -pub type ze_module_properties_t = _ze_module_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_kernel_desc_t"] -pub type ze_kernel_desc_t = _ze_kernel_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_kernel_uuid_t"] -pub type ze_kernel_uuid_t = _ze_kernel_uuid_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_kernel_properties_t"] -pub type ze_kernel_properties_t = _ze_kernel_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_group_count_t"] -pub type ze_group_count_t = _ze_group_count_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_module_program_exp_desc_t"] -pub type ze_module_program_exp_desc_t = _ze_module_program_exp_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_device_raytracing_ext_properties_t"] -pub type ze_device_raytracing_ext_properties_t = _ze_device_raytracing_ext_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_raytracing_mem_alloc_ext_desc_t"] -pub type ze_raytracing_mem_alloc_ext_desc_t = _ze_raytracing_mem_alloc_ext_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_sampler_desc_t"] -pub type ze_sampler_desc_t = _ze_sampler_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_physical_mem_desc_t"] -pub type ze_physical_mem_desc_t = _ze_physical_mem_desc_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_float_atomic_ext_properties_t"] -pub type ze_float_atomic_ext_properties_t = _ze_float_atomic_ext_properties_t; -#[doc = ""] -#[doc = " @brief Forward-declare ze_relaxed_allocation_limits_exp_desc_t"] -pub type ze_relaxed_allocation_limits_exp_desc_t = _ze_relaxed_allocation_limits_exp_desc_t; -impl _ze_init_flags_t { - #[doc = "< only initialize GPU drivers"] - pub const ZE_INIT_FLAG_GPU_ONLY: _ze_init_flags_t = _ze_init_flags_t(1); -} -impl _ze_init_flags_t { - pub const ZE_INIT_FLAG_FORCE_UINT32: _ze_init_flags_t = _ze_init_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_init_flags_t> for _ze_init_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_init_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_init_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_init_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_init_flags_t> for _ze_init_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_init_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_init_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_init_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported initialization flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_init_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported initialization flags"] -pub use self::_ze_init_flags_t as ze_init_flags_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Initialize the 'oneAPI' driver(s)"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must call this function before calling any other"] - #[doc = " function."] - #[doc = " - If this function is not called then all other functions will return"] - #[doc = " ::ZE_RESULT_ERROR_UNINITIALIZED."] - #[doc = " - Only one instance of each driver will be initialized per process."] - #[doc = " - The application may call this function multiple times with different"] - #[doc = " flags or environment variables enabled."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe for scenarios"] - #[doc = " where multiple libraries may initialize the driver(s) simultaneously."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < flags`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - pub fn zeInit(flags: ze_init_flags_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves driver instances"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - A driver represents a collection of physical devices."] - #[doc = " - Multiple calls to this function will return identical driver handles,"] - #[doc = " in the same order."] - #[doc = " - The application may pass nullptr for pDrivers when only querying the"] - #[doc = " number of drivers."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clGetPlatformIDs"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeDriverGet(pCount: *mut u32, phDrivers: *mut ze_driver_handle_t) -> ze_result_t; -} -impl _ze_api_version_t { - #[doc = "< version 1.0"] - pub const ZE_API_VERSION_1_0: _ze_api_version_t = _ze_api_version_t(65536); -} -impl _ze_api_version_t { - #[doc = "< version 1.1"] - pub const ZE_API_VERSION_1_1: _ze_api_version_t = _ze_api_version_t(65537); -} -impl _ze_api_version_t { - #[doc = "< latest known version"] - pub const ZE_API_VERSION_CURRENT: _ze_api_version_t = _ze_api_version_t(65537); -} -impl _ze_api_version_t { - pub const ZE_API_VERSION_FORCE_UINT32: _ze_api_version_t = _ze_api_version_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported API versions"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - API versions contain major and minor attributes, use"] -#[doc = " ::ZE_MAJOR_VERSION and ::ZE_MINOR_VERSION"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_api_version_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported API versions"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - API versions contain major and minor attributes, use"] -#[doc = " ::ZE_MAJOR_VERSION and ::ZE_MINOR_VERSION"] -pub use self::_ze_api_version_t as ze_api_version_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Returns the API version supported by the specified driver"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == version`"] - pub fn zeDriverGetApiVersion( - hDriver: ze_driver_handle_t, - version: *mut ze_api_version_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Driver universal unique id (UUID)"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_driver_uuid_t { - #[doc = "< [out] opaque data representing a driver UUID"] - pub id: [u8; 16usize], -} -#[test] -fn bindgen_test_layout__ze_driver_uuid_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_uuid_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_driver_uuid_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_uuid_t>(), - 1usize, - concat!("Alignment of ", stringify!(_ze_driver_uuid_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_driver_uuid_t>())).id as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_uuid_t), - "::", - stringify!(id) - ) - ); -} -#[doc = ""] -#[doc = " @brief Driver properties queried using ::zeDriverGetProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] universal unique identifier."] - pub uuid: ze_driver_uuid_t, - #[doc = "< [out] driver version"] - #[doc = "< The driver version is a non-zero, monotonically increasing value where"] - #[doc = "< higher values always indicate a more recent version."] - pub driverVersion: u32, -} -#[test] -fn bindgen_test_layout__ze_driver_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_properties_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_driver_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_driver_properties_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_driver_properties_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_driver_properties_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_driver_properties_t>())).uuid as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_properties_t), - "::", - stringify!(uuid) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_properties_t>())).driverVersion as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_properties_t), - "::", - stringify!(driverVersion) - ) - ); -} -impl Default for _ze_driver_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves properties of the driver."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clGetPlatformInfo**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pDriverProperties`"] - pub fn zeDriverGetProperties( - hDriver: ze_driver_handle_t, - pDriverProperties: *mut ze_driver_properties_t, - ) -> ze_result_t; -} -impl _ze_ipc_property_flags_t { - #[doc = "< Supports passing memory allocations between processes. See"] - #[doc = "< ::zeMemGetIpcHandle."] - pub const ZE_IPC_PROPERTY_FLAG_MEMORY: _ze_ipc_property_flags_t = _ze_ipc_property_flags_t(1); -} -impl _ze_ipc_property_flags_t { - #[doc = "< Supports passing event pools between processes. See"] - #[doc = "< ::zeEventPoolGetIpcHandle."] - pub const ZE_IPC_PROPERTY_FLAG_EVENT_POOL: _ze_ipc_property_flags_t = - _ze_ipc_property_flags_t(2); -} -impl _ze_ipc_property_flags_t { - pub const ZE_IPC_PROPERTY_FLAG_FORCE_UINT32: _ze_ipc_property_flags_t = - _ze_ipc_property_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_ipc_property_flags_t> for _ze_ipc_property_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_ipc_property_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_ipc_property_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_ipc_property_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_ipc_property_flags_t> for _ze_ipc_property_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_ipc_property_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_ipc_property_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_ipc_property_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported IPC property flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_ipc_property_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported IPC property flags"] -pub use self::_ze_ipc_property_flags_t as ze_ipc_property_flags_t; -#[doc = ""] -#[doc = " @brief IPC properties queried using ::zeDriverGetIpcProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_ipc_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] 0 (none) or a valid combination of ::ze_ipc_property_flags_t"] - pub flags: ze_ipc_property_flags_t, -} -#[test] -fn bindgen_test_layout__ze_driver_ipc_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_ipc_properties_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_driver_ipc_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_ipc_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_driver_ipc_properties_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_ipc_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_ipc_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_ipc_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_ipc_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_ipc_properties_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_ipc_properties_t), - "::", - stringify!(flags) - ) - ); -} -impl Default for _ze_driver_ipc_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves IPC attributes of the driver"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pIpcProperties`"] - pub fn zeDriverGetIpcProperties( - hDriver: ze_driver_handle_t, - pIpcProperties: *mut ze_driver_ipc_properties_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Extension properties queried using ::zeDriverGetExtensionProperties"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct _ze_driver_extension_properties_t { - #[doc = "< [out] extension name"] - pub name: [::std::os::raw::c_char; 256usize], - #[doc = "< [out] extension version using ::ZE_MAKE_VERSION"] - pub version: u32, -} -#[test] -fn bindgen_test_layout__ze_driver_extension_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_extension_properties_t>(), - 260usize, - concat!("Size of: ", stringify!(_ze_driver_extension_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_extension_properties_t>(), - 4usize, - concat!( - "Alignment of ", - stringify!(_ze_driver_extension_properties_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_extension_properties_t>())).name as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_extension_properties_t), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_extension_properties_t>())).version as *const _ - as usize - }, - 256usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_extension_properties_t), - "::", - stringify!(version) - ) - ); -} -impl Default for _ze_driver_extension_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves extension properties"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkEnumerateInstanceExtensionProperties**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeDriverGetExtensionProperties( - hDriver: ze_driver_handle_t, - pCount: *mut u32, - pExtensionProperties: *mut ze_driver_extension_properties_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves function pointer for vendor-specific or experimental"] - #[doc = " extensions"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == name`"] - #[doc = " + `nullptr == ppFunctionAddress`"] - pub fn zeDriverGetExtensionFunctionAddress( - hDriver: ze_driver_handle_t, - name: *const ::std::os::raw::c_char, - ppFunctionAddress: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves devices within a driver"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Multiple calls to this function will return identical device handles,"] - #[doc = " in the same order."] - #[doc = " - The number and order of handles returned from this function is"] - #[doc = " affected by the ::ZE_AFFINITY_MASK and ::ZE_ENABLE_PCI_ID_DEVICE_ORDER"] - #[doc = " environment variables."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeDeviceGet( - hDriver: ze_driver_handle_t, - pCount: *mut u32, - phDevices: *mut ze_device_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves a sub-device from a device"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Multiple calls to this function will return identical device handles,"] - #[doc = " in the same order."] - #[doc = " - The number of handles returned from this function is affected by the"] - #[doc = " ::ZE_AFFINITY_MASK environment variable."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clCreateSubDevices"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeDeviceGetSubDevices( - hDevice: ze_device_handle_t, - pCount: *mut u32, - phSubdevices: *mut ze_device_handle_t, - ) -> ze_result_t; -} -impl _ze_device_type_t { - #[doc = "< Graphics Processing Unit"] - pub const ZE_DEVICE_TYPE_GPU: _ze_device_type_t = _ze_device_type_t(1); -} -impl _ze_device_type_t { - #[doc = "< Central Processing Unit"] - pub const ZE_DEVICE_TYPE_CPU: _ze_device_type_t = _ze_device_type_t(2); -} -impl _ze_device_type_t { - #[doc = "< Field Programmable Gate Array"] - pub const ZE_DEVICE_TYPE_FPGA: _ze_device_type_t = _ze_device_type_t(3); -} -impl _ze_device_type_t { - #[doc = "< Memory Copy Accelerator"] - pub const ZE_DEVICE_TYPE_MCA: _ze_device_type_t = _ze_device_type_t(4); -} -impl _ze_device_type_t { - pub const ZE_DEVICE_TYPE_FORCE_UINT32: _ze_device_type_t = _ze_device_type_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported device types"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_type_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported device types"] -pub use self::_ze_device_type_t as ze_device_type_t; -#[doc = ""] -#[doc = " @brief Device universal unique id (UUID)"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_device_uuid_t { - #[doc = "< [out] opaque data representing a device UUID"] - pub id: [u8; 16usize], -} -#[test] -fn bindgen_test_layout__ze_device_uuid_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_uuid_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_device_uuid_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_uuid_t>(), - 1usize, - concat!("Alignment of ", stringify!(_ze_device_uuid_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_uuid_t>())).id as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_uuid_t), - "::", - stringify!(id) - ) - ); -} -impl _ze_device_property_flags_t { - #[doc = "< Device is integrated with the Host."] - pub const ZE_DEVICE_PROPERTY_FLAG_INTEGRATED: _ze_device_property_flags_t = - _ze_device_property_flags_t(1); -} -impl _ze_device_property_flags_t { - #[doc = "< Device handle used for query represents a sub-device."] - pub const ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE: _ze_device_property_flags_t = - _ze_device_property_flags_t(2); -} -impl _ze_device_property_flags_t { - #[doc = "< Device supports error correction memory access."] - pub const ZE_DEVICE_PROPERTY_FLAG_ECC: _ze_device_property_flags_t = - _ze_device_property_flags_t(4); -} -impl _ze_device_property_flags_t { - #[doc = "< Device supports on-demand page-faulting."] - pub const ZE_DEVICE_PROPERTY_FLAG_ONDEMANDPAGING: _ze_device_property_flags_t = - _ze_device_property_flags_t(8); -} -impl _ze_device_property_flags_t { - pub const ZE_DEVICE_PROPERTY_FLAG_FORCE_UINT32: _ze_device_property_flags_t = - _ze_device_property_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_property_flags_t> for _ze_device_property_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_property_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_property_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_property_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_property_flags_t> for _ze_device_property_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_property_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_property_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_property_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported device property flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_property_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported device property flags"] -pub use self::_ze_device_property_flags_t as ze_device_property_flags_t; -#[doc = ""] -#[doc = " @brief Device properties queried using ::zeDeviceGetProperties"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct _ze_device_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] generic device type"] - pub type_: ze_device_type_t, - #[doc = "< [out] vendor id from PCI configuration"] - pub vendorId: u32, - #[doc = "< [out] device id from PCI configuration"] - pub deviceId: u32, - #[doc = "< [out] 0 (none) or a valid combination of ::ze_device_property_flags_t"] - pub flags: ze_device_property_flags_t, - #[doc = "< [out] sub-device id. Only valid if ::ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE"] - #[doc = "< is set."] - pub subdeviceId: u32, - #[doc = "< [out] Clock rate for device core."] - pub coreClockRate: u32, - #[doc = "< [out] Maximum memory allocation size."] - pub maxMemAllocSize: u64, - #[doc = "< [out] Maximum number of logical hardware contexts."] - pub maxHardwareContexts: u32, - #[doc = "< [out] Maximum priority for command queues. Higher value is higher"] - #[doc = "< priority."] - pub maxCommandQueuePriority: u32, - #[doc = "< [out] Number of threads per EU."] - pub numThreadsPerEU: u32, - #[doc = "< [out] The physical EU simd width."] - pub physicalEUSimdWidth: u32, - #[doc = "< [out] Number of EUs per sub-slice."] - pub numEUsPerSubslice: u32, - #[doc = "< [out] Number of sub-slices per slice."] - pub numSubslicesPerSlice: u32, - #[doc = "< [out] Number of slices."] - pub numSlices: u32, - #[doc = "< [out] Returns the resolution of device timer in cycles per second used"] - #[doc = "< for profiling, timestamps, etc."] - pub timerResolution: u64, - #[doc = "< [out] Returns the number of valid bits in the timestamp value."] - pub timestampValidBits: u32, - #[doc = "< [out] Returns the number of valid bits in the kernel timestamp values"] - pub kernelTimestampValidBits: u32, - #[doc = "< [out] universal unique identifier. Note: Subdevices will have their"] - #[doc = "< own uuid."] - pub uuid: ze_device_uuid_t, - #[doc = "< [out] Device name"] - pub name: [::std::os::raw::c_char; 256usize], -} -#[test] -fn bindgen_test_layout__ze_device_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_properties_t>(), - 368usize, - concat!("Size of: ", stringify!(_ze_device_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_properties_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_properties_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_properties_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_properties_t>())).type_ as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).vendorId as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(vendorId) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).deviceId as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(deviceId) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_properties_t>())).flags as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).subdeviceId as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(subdeviceId) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).coreClockRate as *const _ as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(coreClockRate) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).maxMemAllocSize as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(maxMemAllocSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).maxHardwareContexts as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(maxHardwareContexts) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).maxCommandQueuePriority as *const _ - as usize - }, - 52usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(maxCommandQueuePriority) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).numThreadsPerEU as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(numThreadsPerEU) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).physicalEUSimdWidth as *const _ - as usize - }, - 60usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(physicalEUSimdWidth) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).numEUsPerSubslice as *const _ - as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(numEUsPerSubslice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).numSubslicesPerSlice as *const _ - as usize - }, - 68usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(numSubslicesPerSlice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).numSlices as *const _ as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(numSlices) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).timerResolution as *const _ as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(timerResolution) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).timestampValidBits as *const _ - as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(timestampValidBits) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_properties_t>())).kernelTimestampValidBits as *const _ - as usize - }, - 92usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(kernelTimestampValidBits) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_properties_t>())).uuid as *const _ as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(uuid) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_properties_t>())).name as *const _ as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_properties_t), - "::", - stringify!(name) - ) - ); -} -impl Default for _ze_device_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -#[doc = ""] -#[doc = " @brief Device thread identifier."] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_device_thread_t { - #[doc = "< [in,out] the slice number."] - #[doc = "< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numSlices."] - pub slice: u32, - #[doc = "< [in,out] the sub-slice number within its slice."] - #[doc = "< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numSubslicesPerSlice."] - pub subslice: u32, - #[doc = "< [in,out] the EU number within its sub-slice."] - #[doc = "< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numEUsPerSubslice."] - pub eu: u32, - #[doc = "< [in,out] the thread number within its EU."] - #[doc = "< Must be UINT32_MAX (all) or less than ::ze_device_properties_t.numThreadsPerEU."] - pub thread: u32, -} -#[test] -fn bindgen_test_layout__ze_device_thread_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_thread_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_device_thread_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_thread_t>(), - 4usize, - concat!("Alignment of ", stringify!(_ze_device_thread_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_thread_t>())).slice as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_thread_t), - "::", - stringify!(slice) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_thread_t>())).subslice as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_thread_t), - "::", - stringify!(subslice) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_thread_t>())).eu as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_thread_t), - "::", - stringify!(eu) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_thread_t>())).thread as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_thread_t), - "::", - stringify!(thread) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves properties of the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clGetDeviceInfo"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pDeviceProperties`"] - pub fn zeDeviceGetProperties( - hDevice: ze_device_handle_t, - pDeviceProperties: *mut ze_device_properties_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Device compute properties queried using ::zeDeviceGetComputeProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_compute_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] Maximum items per compute group. (groupSizeX * groupSizeY *"] - #[doc = "< groupSizeZ) <= maxTotalGroupSize"] - pub maxTotalGroupSize: u32, - #[doc = "< [out] Maximum items for X dimension in group"] - pub maxGroupSizeX: u32, - #[doc = "< [out] Maximum items for Y dimension in group"] - pub maxGroupSizeY: u32, - #[doc = "< [out] Maximum items for Z dimension in group"] - pub maxGroupSizeZ: u32, - #[doc = "< [out] Maximum groups that can be launched for x dimension"] - pub maxGroupCountX: u32, - #[doc = "< [out] Maximum groups that can be launched for y dimension"] - pub maxGroupCountY: u32, - #[doc = "< [out] Maximum groups that can be launched for z dimension"] - pub maxGroupCountZ: u32, - #[doc = "< [out] Maximum shared local memory per group."] - pub maxSharedLocalMemory: u32, - #[doc = "< [out] Number of subgroup sizes supported. This indicates number of"] - #[doc = "< entries in subGroupSizes."] - pub numSubGroupSizes: u32, - #[doc = "< [out] Size group sizes supported."] - pub subGroupSizes: [u32; 8usize], -} -#[test] -fn bindgen_test_layout__ze_device_compute_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_compute_properties_t>(), - 88usize, - concat!("Size of: ", stringify!(_ze_device_compute_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_compute_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_compute_properties_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxTotalGroupSize - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxTotalGroupSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxGroupSizeX as *const _ - as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxGroupSizeX) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxGroupSizeY as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxGroupSizeY) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxGroupSizeZ as *const _ - as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxGroupSizeZ) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxGroupCountX as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxGroupCountX) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxGroupCountY as *const _ - as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxGroupCountY) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxGroupCountZ as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxGroupCountZ) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).maxSharedLocalMemory - as *const _ as usize - }, - 44usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(maxSharedLocalMemory) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).numSubGroupSizes as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(numSubGroupSizes) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_compute_properties_t>())).subGroupSizes as *const _ - as usize - }, - 52usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_compute_properties_t), - "::", - stringify!(subGroupSizes) - ) - ); -} -impl Default for _ze_device_compute_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves compute properties of the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clGetDeviceInfo"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pComputeProperties`"] - pub fn zeDeviceGetComputeProperties( - hDevice: ze_device_handle_t, - pComputeProperties: *mut ze_device_compute_properties_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Native kernel universal unique id (UUID)"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_native_kernel_uuid_t { - #[doc = "< [out] opaque data representing a native kernel UUID"] - pub id: [u8; 16usize], -} -#[test] -fn bindgen_test_layout__ze_native_kernel_uuid_t() { - assert_eq!( - ::std::mem::size_of::<_ze_native_kernel_uuid_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_native_kernel_uuid_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_native_kernel_uuid_t>(), - 1usize, - concat!("Alignment of ", stringify!(_ze_native_kernel_uuid_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_native_kernel_uuid_t>())).id as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_native_kernel_uuid_t), - "::", - stringify!(id) - ) - ); -} -impl _ze_device_module_flags_t { - #[doc = "< Device supports 16-bit floating-point operations"] - pub const ZE_DEVICE_MODULE_FLAG_FP16: _ze_device_module_flags_t = _ze_device_module_flags_t(1); -} -impl _ze_device_module_flags_t { - #[doc = "< Device supports 64-bit floating-point operations"] - pub const ZE_DEVICE_MODULE_FLAG_FP64: _ze_device_module_flags_t = _ze_device_module_flags_t(2); -} -impl _ze_device_module_flags_t { - #[doc = "< Device supports 64-bit atomic operations"] - pub const ZE_DEVICE_MODULE_FLAG_INT64_ATOMICS: _ze_device_module_flags_t = - _ze_device_module_flags_t(4); -} -impl _ze_device_module_flags_t { - #[doc = "< Device supports four component dot product and accumulate operations"] - pub const ZE_DEVICE_MODULE_FLAG_DP4A: _ze_device_module_flags_t = _ze_device_module_flags_t(8); -} -impl _ze_device_module_flags_t { - pub const ZE_DEVICE_MODULE_FLAG_FORCE_UINT32: _ze_device_module_flags_t = - _ze_device_module_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_module_flags_t> for _ze_device_module_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_module_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_module_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_module_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_module_flags_t> for _ze_device_module_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_module_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_module_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_module_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported device module flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_module_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported device module flags"] -pub use self::_ze_device_module_flags_t as ze_device_module_flags_t; -impl _ze_device_fp_flags_t { - #[doc = "< Supports denorms"] - pub const ZE_DEVICE_FP_FLAG_DENORM: _ze_device_fp_flags_t = _ze_device_fp_flags_t(1); -} -impl _ze_device_fp_flags_t { - #[doc = "< Supports INF and quiet NaNs"] - pub const ZE_DEVICE_FP_FLAG_INF_NAN: _ze_device_fp_flags_t = _ze_device_fp_flags_t(2); -} -impl _ze_device_fp_flags_t { - #[doc = "< Supports rounding to nearest even rounding mode"] - pub const ZE_DEVICE_FP_FLAG_ROUND_TO_NEAREST: _ze_device_fp_flags_t = _ze_device_fp_flags_t(4); -} -impl _ze_device_fp_flags_t { - #[doc = "< Supports rounding to zero."] - pub const ZE_DEVICE_FP_FLAG_ROUND_TO_ZERO: _ze_device_fp_flags_t = _ze_device_fp_flags_t(8); -} -impl _ze_device_fp_flags_t { - #[doc = "< Supports rounding to both positive and negative INF."] - pub const ZE_DEVICE_FP_FLAG_ROUND_TO_INF: _ze_device_fp_flags_t = _ze_device_fp_flags_t(16); -} -impl _ze_device_fp_flags_t { - #[doc = "< Supports IEEE754-2008 fused multiply-add."] - pub const ZE_DEVICE_FP_FLAG_FMA: _ze_device_fp_flags_t = _ze_device_fp_flags_t(32); -} -impl _ze_device_fp_flags_t { - #[doc = "< Supports rounding as defined by IEEE754 for divide and sqrt"] - #[doc = "< operations."] - pub const ZE_DEVICE_FP_FLAG_ROUNDED_DIVIDE_SQRT: _ze_device_fp_flags_t = - _ze_device_fp_flags_t(64); -} -impl _ze_device_fp_flags_t { - #[doc = "< Uses software implementation for basic floating-point operations."] - pub const ZE_DEVICE_FP_FLAG_SOFT_FLOAT: _ze_device_fp_flags_t = _ze_device_fp_flags_t(128); -} -impl _ze_device_fp_flags_t { - pub const ZE_DEVICE_FP_FLAG_FORCE_UINT32: _ze_device_fp_flags_t = - _ze_device_fp_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_fp_flags_t> for _ze_device_fp_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_fp_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_fp_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_fp_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_fp_flags_t> for _ze_device_fp_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_fp_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_fp_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_fp_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported floating-Point capability flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_fp_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported floating-Point capability flags"] -pub use self::_ze_device_fp_flags_t as ze_device_fp_flags_t; -#[doc = ""] -#[doc = " @brief Device module properties queried using ::zeDeviceGetModuleProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_module_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] Maximum supported SPIR-V version."] - #[doc = "< Returns zero if SPIR-V is not supported."] - #[doc = "< Contains major and minor attributes, use ::ZE_MAJOR_VERSION and ::ZE_MINOR_VERSION."] - pub spirvVersionSupported: u32, - #[doc = "< [out] 0 or a valid combination of ::ze_device_module_flags_t"] - pub flags: ze_device_module_flags_t, - #[doc = "< [out] Capabilities for half-precision floating-point operations."] - #[doc = "< returns 0 (if ::ZE_DEVICE_MODULE_FLAG_FP16 is not set) or a"] - #[doc = "< combination of ::ze_device_fp_flags_t."] - pub fp16flags: ze_device_fp_flags_t, - #[doc = "< [out] Capabilities for single-precision floating-point operations."] - #[doc = "< returns a combination of ::ze_device_fp_flags_t."] - pub fp32flags: ze_device_fp_flags_t, - #[doc = "< [out] Capabilities for double-precision floating-point operations."] - #[doc = "< returns 0 (if ::ZE_DEVICE_MODULE_FLAG_FP64 is not set) or a"] - #[doc = "< combination of ::ze_device_fp_flags_t."] - pub fp64flags: ze_device_fp_flags_t, - #[doc = "< [out] Maximum kernel argument size that is supported."] - pub maxArgumentsSize: u32, - #[doc = "< [out] Maximum size of internal buffer that holds output of printf"] - #[doc = "< calls from kernel."] - pub printfBufferSize: u32, - #[doc = "< [out] Compatibility UUID of supported native kernel."] - #[doc = "< UUID may or may not be the same across driver release, devices, or"] - #[doc = "< operating systems."] - #[doc = "< Application is responsible for ensuring UUID matches before creating"] - #[doc = "< module using"] - #[doc = "< previously created native kernel."] - pub nativeKernelSupported: ze_native_kernel_uuid_t, -} -#[test] -fn bindgen_test_layout__ze_device_module_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_module_properties_t>(), - 64usize, - concat!("Size of: ", stringify!(_ze_device_module_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_module_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_module_properties_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).spirvVersionSupported - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(spirvVersionSupported) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).flags as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).fp16flags as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(fp16flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).fp32flags as *const _ - as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(fp32flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).fp64flags as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(fp64flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).maxArgumentsSize as *const _ - as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(maxArgumentsSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).printfBufferSize as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(printfBufferSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_module_properties_t>())).nativeKernelSupported - as *const _ as usize - }, - 44usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_module_properties_t), - "::", - stringify!(nativeKernelSupported) - ) - ); -} -impl Default for _ze_device_module_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves module properties of the device"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pModuleProperties`"] - pub fn zeDeviceGetModuleProperties( - hDevice: ze_device_handle_t, - pModuleProperties: *mut ze_device_module_properties_t, - ) -> ze_result_t; -} -impl _ze_command_queue_group_property_flags_t { - #[doc = "< Command queue group supports enqueing compute commands."] - pub const ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_COMPUTE: - _ze_command_queue_group_property_flags_t = _ze_command_queue_group_property_flags_t(1); -} -impl _ze_command_queue_group_property_flags_t { - #[doc = "< Command queue group supports enqueing copy commands."] - pub const ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_COPY: _ze_command_queue_group_property_flags_t = - _ze_command_queue_group_property_flags_t(2); -} -impl _ze_command_queue_group_property_flags_t { - #[doc = "< Command queue group supports cooperative kernels."] - #[doc = "< See ::zeCommandListAppendLaunchCooperativeKernel for more details."] - pub const ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_COOPERATIVE_KERNELS: - _ze_command_queue_group_property_flags_t = _ze_command_queue_group_property_flags_t(4); -} -impl _ze_command_queue_group_property_flags_t { - #[doc = "< Command queue groups supports metric queries."] - pub const ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_METRICS: - _ze_command_queue_group_property_flags_t = _ze_command_queue_group_property_flags_t(8); -} -impl _ze_command_queue_group_property_flags_t { - pub const ZE_COMMAND_QUEUE_GROUP_PROPERTY_FLAG_FORCE_UINT32: - _ze_command_queue_group_property_flags_t = - _ze_command_queue_group_property_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_command_queue_group_property_flags_t> - for _ze_command_queue_group_property_flags_t -{ - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_command_queue_group_property_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_command_queue_group_property_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_command_queue_group_property_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_command_queue_group_property_flags_t> - for _ze_command_queue_group_property_flags_t -{ - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_command_queue_group_property_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_command_queue_group_property_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_command_queue_group_property_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported command queue group property flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_command_queue_group_property_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported command queue group property flags"] -pub use self::_ze_command_queue_group_property_flags_t as ze_command_queue_group_property_flags_t; -#[doc = ""] -#[doc = " @brief Command queue group properties queried using"] -#[doc = " ::zeDeviceGetCommandQueueGroupProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_queue_group_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] 0 (none) or a valid combination of"] - #[doc = "< ::ze_command_queue_group_property_flags_t"] - pub flags: ze_command_queue_group_property_flags_t, - #[doc = "< [out] maximum `pattern_size` supported by command queue group."] - #[doc = "< See ::zeCommandListAppendMemoryFill for more details."] - pub maxMemoryFillPatternSize: usize, - #[doc = "< [out] the number of physical engines within the group."] - pub numQueues: u32, -} -#[test] -fn bindgen_test_layout__ze_command_queue_group_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_queue_group_properties_t>(), - 40usize, - concat!( - "Size of: ", - stringify!(_ze_command_queue_group_properties_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_queue_group_properties_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_queue_group_properties_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_group_properties_t>())).stype as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_group_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_group_properties_t>())).pNext as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_group_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_group_properties_t>())).flags as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_group_properties_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_group_properties_t>())) - .maxMemoryFillPatternSize as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_group_properties_t), - "::", - stringify!(maxMemoryFillPatternSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_group_properties_t>())).numQueues as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_group_properties_t), - "::", - stringify!(numQueues) - ) - ); -} -impl Default for _ze_command_queue_group_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves command queue group properties of the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Properties are reported for each physical command queue type supported"] - #[doc = " by the device."] - #[doc = " - Multiple calls to this function will return properties in the same"] - #[doc = " order."] - #[doc = " - The order in which the properties are returned defines the command"] - #[doc = " queue group's ordinal."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkGetPhysicalDeviceQueueFamilyProperties**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeDeviceGetCommandQueueGroupProperties( - hDevice: ze_device_handle_t, - pCount: *mut u32, - pCommandQueueGroupProperties: *mut ze_command_queue_group_properties_t, - ) -> ze_result_t; -} -impl _ze_device_memory_property_flags_t { - #[doc = "< reserved for future use"] - pub const ZE_DEVICE_MEMORY_PROPERTY_FLAG_TBD: _ze_device_memory_property_flags_t = - _ze_device_memory_property_flags_t(1); -} -impl _ze_device_memory_property_flags_t { - pub const ZE_DEVICE_MEMORY_PROPERTY_FLAG_FORCE_UINT32: _ze_device_memory_property_flags_t = - _ze_device_memory_property_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_memory_property_flags_t> for _ze_device_memory_property_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_memory_property_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_memory_property_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_memory_property_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_memory_property_flags_t> for _ze_device_memory_property_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_memory_property_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_memory_property_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_memory_property_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported device memory property flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_memory_property_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported device memory property flags"] -pub use self::_ze_device_memory_property_flags_t as ze_device_memory_property_flags_t; -#[doc = ""] -#[doc = " @brief Device local memory properties queried using"] -#[doc = " ::zeDeviceGetMemoryProperties"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct _ze_device_memory_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] 0 (none) or a valid combination of"] - #[doc = "< ::ze_device_memory_property_flags_t"] - pub flags: ze_device_memory_property_flags_t, - #[doc = "< [out] Maximum clock rate for device memory."] - pub maxClockRate: u32, - #[doc = "< [out] Maximum bus width between device and memory."] - pub maxBusWidth: u32, - #[doc = "< [out] Total memory size in bytes that is available to the device."] - pub totalSize: u64, - #[doc = "< [out] Memory name"] - pub name: [::std::os::raw::c_char; 256usize], -} -#[test] -fn bindgen_test_layout__ze_device_memory_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_memory_properties_t>(), - 296usize, - concat!("Size of: ", stringify!(_ze_device_memory_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_memory_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_memory_properties_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_properties_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_properties_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_properties_t>())).maxClockRate as *const _ - as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_properties_t), - "::", - stringify!(maxClockRate) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_properties_t>())).maxBusWidth as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_properties_t), - "::", - stringify!(maxBusWidth) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_properties_t>())).totalSize as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_properties_t), - "::", - stringify!(totalSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_properties_t>())).name as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_properties_t), - "::", - stringify!(name) - ) - ); -} -impl Default for _ze_device_memory_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves local memory properties of the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Properties are reported for each physical memory type supported by the"] - #[doc = " device."] - #[doc = " - Multiple calls to this function will return properties in the same"] - #[doc = " order."] - #[doc = " - The order in which the properties are returned defines the device's"] - #[doc = " local memory ordinal."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clGetDeviceInfo"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeDeviceGetMemoryProperties( - hDevice: ze_device_handle_t, - pCount: *mut u32, - pMemProperties: *mut ze_device_memory_properties_t, - ) -> ze_result_t; -} -impl _ze_memory_access_cap_flags_t { - #[doc = "< Supports load/store access"] - pub const ZE_MEMORY_ACCESS_CAP_FLAG_RW: _ze_memory_access_cap_flags_t = - _ze_memory_access_cap_flags_t(1); -} -impl _ze_memory_access_cap_flags_t { - #[doc = "< Supports atomic access"] - pub const ZE_MEMORY_ACCESS_CAP_FLAG_ATOMIC: _ze_memory_access_cap_flags_t = - _ze_memory_access_cap_flags_t(2); -} -impl _ze_memory_access_cap_flags_t { - #[doc = "< Supports concurrent access"] - pub const ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT: _ze_memory_access_cap_flags_t = - _ze_memory_access_cap_flags_t(4); -} -impl _ze_memory_access_cap_flags_t { - #[doc = "< Supports concurrent atomic access"] - pub const ZE_MEMORY_ACCESS_CAP_FLAG_CONCURRENT_ATOMIC: _ze_memory_access_cap_flags_t = - _ze_memory_access_cap_flags_t(8); -} -impl _ze_memory_access_cap_flags_t { - pub const ZE_MEMORY_ACCESS_CAP_FLAG_FORCE_UINT32: _ze_memory_access_cap_flags_t = - _ze_memory_access_cap_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_memory_access_cap_flags_t> for _ze_memory_access_cap_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_memory_access_cap_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_memory_access_cap_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_memory_access_cap_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_memory_access_cap_flags_t> for _ze_memory_access_cap_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_memory_access_cap_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_memory_access_cap_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_memory_access_cap_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Memory access capability flags"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - Supported access capabilities for different types of memory"] -#[doc = " allocations"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_memory_access_cap_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Memory access capability flags"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - Supported access capabilities for different types of memory"] -#[doc = " allocations"] -pub use self::_ze_memory_access_cap_flags_t as ze_memory_access_cap_flags_t; -#[doc = ""] -#[doc = " @brief Device memory access properties queried using"] -#[doc = " ::zeDeviceGetMemoryAccessProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_memory_access_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] host memory capabilities."] - #[doc = "< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flags_t."] - pub hostAllocCapabilities: ze_memory_access_cap_flags_t, - #[doc = "< [out] device memory capabilities."] - #[doc = "< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flags_t."] - pub deviceAllocCapabilities: ze_memory_access_cap_flags_t, - #[doc = "< [out] shared, single-device memory capabilities."] - #[doc = "< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flags_t."] - pub sharedSingleDeviceAllocCapabilities: ze_memory_access_cap_flags_t, - #[doc = "< [out] shared, cross-device memory capabilities."] - #[doc = "< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flags_t."] - pub sharedCrossDeviceAllocCapabilities: ze_memory_access_cap_flags_t, - #[doc = "< [out] shared, system memory capabilities."] - #[doc = "< returns 0 (unsupported) or a combination of ::ze_memory_access_cap_flags_t."] - pub sharedSystemAllocCapabilities: ze_memory_access_cap_flags_t, -} -#[test] -fn bindgen_test_layout__ze_device_memory_access_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_memory_access_properties_t>(), - 40usize, - concat!( - "Size of: ", - stringify!(_ze_device_memory_access_properties_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_memory_access_properties_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_memory_access_properties_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_access_properties_t>())).stype as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_access_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_access_properties_t>())).pNext as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_access_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_access_properties_t>())).hostAllocCapabilities - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_access_properties_t), - "::", - stringify!(hostAllocCapabilities) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_access_properties_t>())) - .deviceAllocCapabilities as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_access_properties_t), - "::", - stringify!(deviceAllocCapabilities) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_access_properties_t>())) - .sharedSingleDeviceAllocCapabilities as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_access_properties_t), - "::", - stringify!(sharedSingleDeviceAllocCapabilities) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_access_properties_t>())) - .sharedCrossDeviceAllocCapabilities as *const _ as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_access_properties_t), - "::", - stringify!(sharedCrossDeviceAllocCapabilities) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_memory_access_properties_t>())) - .sharedSystemAllocCapabilities as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_memory_access_properties_t), - "::", - stringify!(sharedSystemAllocCapabilities) - ) - ); -} -impl Default for _ze_device_memory_access_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves memory access properties of the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clGetDeviceInfo"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pMemAccessProperties`"] - pub fn zeDeviceGetMemoryAccessProperties( - hDevice: ze_device_handle_t, - pMemAccessProperties: *mut ze_device_memory_access_properties_t, - ) -> ze_result_t; -} -impl _ze_device_cache_property_flags_t { - #[doc = "< Device support User Cache Control (i.e. SLM section vs Generic Cache)"] - pub const ZE_DEVICE_CACHE_PROPERTY_FLAG_USER_CONTROL: _ze_device_cache_property_flags_t = - _ze_device_cache_property_flags_t(1); -} -impl _ze_device_cache_property_flags_t { - pub const ZE_DEVICE_CACHE_PROPERTY_FLAG_FORCE_UINT32: _ze_device_cache_property_flags_t = - _ze_device_cache_property_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_cache_property_flags_t> for _ze_device_cache_property_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_cache_property_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_cache_property_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_cache_property_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_cache_property_flags_t> for _ze_device_cache_property_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_cache_property_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_cache_property_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_cache_property_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported cache control property flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_cache_property_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported cache control property flags"] -pub use self::_ze_device_cache_property_flags_t as ze_device_cache_property_flags_t; -#[doc = ""] -#[doc = " @brief Device cache properties queried using ::zeDeviceGetCacheProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_cache_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] 0 (none) or a valid combination of"] - #[doc = "< ::ze_device_cache_property_flags_t"] - pub flags: ze_device_cache_property_flags_t, - #[doc = "< [out] Per-cache size, in bytes"] - pub cacheSize: usize, -} -#[test] -fn bindgen_test_layout__ze_device_cache_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_cache_properties_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_device_cache_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_cache_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_cache_properties_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_cache_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_cache_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_cache_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_cache_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_cache_properties_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_cache_properties_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_cache_properties_t>())).cacheSize as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_cache_properties_t), - "::", - stringify!(cacheSize) - ) - ); -} -impl Default for _ze_device_cache_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves cache properties of the device"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clGetDeviceInfo"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeDeviceGetCacheProperties( - hDevice: ze_device_handle_t, - pCount: *mut u32, - pCacheProperties: *mut ze_device_cache_properties_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Device image properties queried using ::zeDeviceGetImageProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_image_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] Maximum image dimensions for 1D resources. if 0, then 1D images"] - #[doc = "< are unsupported."] - pub maxImageDims1D: u32, - #[doc = "< [out] Maximum image dimensions for 2D resources. if 0, then 2D images"] - #[doc = "< are unsupported."] - pub maxImageDims2D: u32, - #[doc = "< [out] Maximum image dimensions for 3D resources. if 0, then 3D images"] - #[doc = "< are unsupported."] - pub maxImageDims3D: u32, - #[doc = "< [out] Maximum image buffer size in bytes. if 0, then buffer images are"] - #[doc = "< unsupported."] - pub maxImageBufferSize: u64, - #[doc = "< [out] Maximum image array slices. if 0, then image arrays are"] - #[doc = "< unsupported."] - pub maxImageArraySlices: u32, - #[doc = "< [out] Max samplers that can be used in kernel. if 0, then sampling is"] - #[doc = "< unsupported."] - pub maxSamplers: u32, - #[doc = "< [out] Returns the maximum number of simultaneous image objects that"] - #[doc = "< can be read from by a kernel. if 0, then reading images is"] - #[doc = "< unsupported."] - pub maxReadImageArgs: u32, - #[doc = "< [out] Returns the maximum number of simultaneous image objects that"] - #[doc = "< can be written to by a kernel. if 0, then writing images is"] - #[doc = "< unsupported."] - pub maxWriteImageArgs: u32, -} -#[test] -fn bindgen_test_layout__ze_device_image_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_image_properties_t>(), - 56usize, - concat!("Size of: ", stringify!(_ze_device_image_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_image_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_image_properties_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxImageDims1D as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxImageDims1D) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxImageDims2D as *const _ - as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxImageDims2D) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxImageDims3D as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxImageDims3D) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxImageBufferSize as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxImageBufferSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxImageArraySlices - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxImageArraySlices) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxSamplers as *const _ - as usize - }, - 44usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxSamplers) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxReadImageArgs as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxReadImageArgs) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_image_properties_t>())).maxWriteImageArgs as *const _ - as usize - }, - 52usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_image_properties_t), - "::", - stringify!(maxWriteImageArgs) - ) - ); -} -impl Default for _ze_device_image_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves image properties of the device"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - See ::zeImageGetProperties for format-specific capabilities."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pImageProperties`"] - pub fn zeDeviceGetImageProperties( - hDevice: ze_device_handle_t, - pImageProperties: *mut ze_device_image_properties_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Device external memory import and export properties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_external_memory_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] Supported external memory import types for memory allocations."] - pub memoryAllocationImportTypes: ze_external_memory_type_flags_t, - #[doc = "< [out] Supported external memory export types for memory allocations."] - pub memoryAllocationExportTypes: ze_external_memory_type_flags_t, - #[doc = "< [out] Supported external memory import types for images."] - pub imageImportTypes: ze_external_memory_type_flags_t, - #[doc = "< [out] Supported external memory export types for images."] - pub imageExportTypes: ze_external_memory_type_flags_t, -} -#[test] -fn bindgen_test_layout__ze_device_external_memory_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_external_memory_properties_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_device_external_memory_properties_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_external_memory_properties_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_external_memory_properties_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_external_memory_properties_t>())).stype as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_external_memory_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_external_memory_properties_t>())).pNext as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_external_memory_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_external_memory_properties_t>())) - .memoryAllocationImportTypes as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_external_memory_properties_t), - "::", - stringify!(memoryAllocationImportTypes) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_external_memory_properties_t>())) - .memoryAllocationExportTypes as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_external_memory_properties_t), - "::", - stringify!(memoryAllocationExportTypes) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_external_memory_properties_t>())).imageImportTypes - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_external_memory_properties_t), - "::", - stringify!(imageImportTypes) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_external_memory_properties_t>())).imageExportTypes - as *const _ as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_external_memory_properties_t), - "::", - stringify!(imageExportTypes) - ) - ); -} -impl Default for _ze_device_external_memory_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves external memory import and export of the device"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pExternalMemoryProperties`"] - pub fn zeDeviceGetExternalMemoryProperties( - hDevice: ze_device_handle_t, - pExternalMemoryProperties: *mut ze_device_external_memory_properties_t, - ) -> ze_result_t; -} -impl _ze_device_p2p_property_flags_t { - #[doc = "< Device supports access between peer devices."] - pub const ZE_DEVICE_P2P_PROPERTY_FLAG_ACCESS: _ze_device_p2p_property_flags_t = - _ze_device_p2p_property_flags_t(1); -} -impl _ze_device_p2p_property_flags_t { - #[doc = "< Device supports atomics between peer devices."] - pub const ZE_DEVICE_P2P_PROPERTY_FLAG_ATOMICS: _ze_device_p2p_property_flags_t = - _ze_device_p2p_property_flags_t(2); -} -impl _ze_device_p2p_property_flags_t { - pub const ZE_DEVICE_P2P_PROPERTY_FLAG_FORCE_UINT32: _ze_device_p2p_property_flags_t = - _ze_device_p2p_property_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_p2p_property_flags_t> for _ze_device_p2p_property_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_p2p_property_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_p2p_property_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_p2p_property_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_p2p_property_flags_t> for _ze_device_p2p_property_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_p2p_property_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_p2p_property_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_p2p_property_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported device peer-to-peer property flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_p2p_property_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported device peer-to-peer property flags"] -pub use self::_ze_device_p2p_property_flags_t as ze_device_p2p_property_flags_t; -#[doc = ""] -#[doc = " @brief Device peer-to-peer properties queried using"] -#[doc = " ::zeDeviceGetP2PProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_p2p_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] 0 (none) or a valid combination of"] - #[doc = "< ::ze_device_p2p_property_flags_t"] - pub flags: ze_device_p2p_property_flags_t, -} -#[test] -fn bindgen_test_layout__ze_device_p2p_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_p2p_properties_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_device_p2p_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_p2p_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_p2p_properties_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_p2p_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_p2p_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_p2p_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_p2p_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_p2p_properties_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_p2p_properties_t), - "::", - stringify!(flags) - ) - ); -} -impl Default for _ze_device_p2p_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves peer-to-peer properties between one device and a peer"] - #[doc = " devices"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " + `nullptr == hPeerDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pP2PProperties`"] - pub fn zeDeviceGetP2PProperties( - hDevice: ze_device_handle_t, - hPeerDevice: ze_device_handle_t, - pP2PProperties: *mut ze_device_p2p_properties_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Queries if one device can directly access peer device allocations"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Any device can access any other device within a node through a"] - #[doc = " scale-up fabric."] - #[doc = " - The following are conditions for CanAccessPeer query."] - #[doc = " + If both device and peer device are the same then return true."] - #[doc = " + If both sub-device and peer sub-device are the same then return"] - #[doc = " true."] - #[doc = " + If both are sub-devices and share the same parent device then"] - #[doc = " return true."] - #[doc = " + If both device and remote device are connected by a direct or"] - #[doc = " indirect scale-up fabric or over PCIe (same root complex or shared"] - #[doc = " PCIe switch) then true."] - #[doc = " + If both sub-device and remote parent device (and vice-versa) are"] - #[doc = " connected by a direct or indirect scale-up fabric or over PCIe"] - #[doc = " (same root complex or shared PCIe switch) then true."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " + `nullptr == hPeerDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == value`"] - pub fn zeDeviceCanAccessPeer( - hDevice: ze_device_handle_t, - hPeerDevice: ze_device_handle_t, - value: *mut ze_bool_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Returns current status of the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Once a device is reset, this call will update the OS handle attached"] - #[doc = " to the device handle."] - #[doc = " - The application may call this function from simultaneous threads with"] - #[doc = " the same device handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " + Device is available for use."] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " + Device is lost; must be reset for use."] - pub fn zeDeviceGetStatus(hDevice: ze_device_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Returns synchronized Host and device global timestamps."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads with"] - #[doc = " the same device handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == hostTimestamp`"] - #[doc = " + `nullptr == deviceTimestamp`"] - pub fn zeDeviceGetGlobalTimestamps( - hDevice: ze_device_handle_t, - hostTimestamp: *mut u64, - deviceTimestamp: *mut u64, - ) -> ze_result_t; -} -impl _ze_context_flags_t { - #[doc = "< reserved for future use"] - pub const ZE_CONTEXT_FLAG_TBD: _ze_context_flags_t = _ze_context_flags_t(1); -} -impl _ze_context_flags_t { - pub const ZE_CONTEXT_FLAG_FORCE_UINT32: _ze_context_flags_t = _ze_context_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_context_flags_t> for _ze_context_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_context_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_context_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_context_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_context_flags_t> for _ze_context_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_context_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_context_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_context_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported context creation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_context_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported context creation flags"] -pub use self::_ze_context_flags_t as ze_context_flags_t; -#[doc = ""] -#[doc = " @brief Context descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] creation flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_context_flags_t;"] - #[doc = "< default behavior may use implicit driver-based heuristics."] - pub flags: ze_context_flags_t, -} -#[test] -fn bindgen_test_layout__ze_context_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_context_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_context_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_context_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_context_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_context_desc_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_desc_t), - "::", - stringify!(flags) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a context for the driver."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must only use the context for the driver which was"] - #[doc = " provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeContextCreate( - hDriver: ze_driver_handle_t, - desc: *const ze_context_desc_t, - phContext: *mut ze_context_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a context for the driver."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must only use the context for the driver which was"] - #[doc = " provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDriver`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phDevices) && (0 < numDevices)`"] - pub fn zeContextCreateEx( - hDriver: ze_driver_handle_t, - desc: *const ze_context_desc_t, - numDevices: u32, - phDevices: *mut ze_device_handle_t, - phContext: *mut ze_context_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys a context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the context before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same context handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeContextDestroy(hContext: ze_context_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Returns current status of the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads with"] - #[doc = " the same context handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " + Context is available for use."] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " + Context is invalid; due to device lost or reset."] - pub fn zeContextGetStatus(hContext: ze_context_handle_t) -> ze_result_t; -} -impl _ze_command_queue_flags_t { - #[doc = "< command queue should be optimized for submission to a single device engine."] - #[doc = "< driver **must** disable any implicit optimizations for distributing"] - #[doc = "< work across multiple engines."] - #[doc = "< this flag should be used when applications want full control over"] - #[doc = "< multi-engine submission and scheduling."] - pub const ZE_COMMAND_QUEUE_FLAG_EXPLICIT_ONLY: _ze_command_queue_flags_t = - _ze_command_queue_flags_t(1); -} -impl _ze_command_queue_flags_t { - pub const ZE_COMMAND_QUEUE_FLAG_FORCE_UINT32: _ze_command_queue_flags_t = - _ze_command_queue_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_command_queue_flags_t> for _ze_command_queue_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_command_queue_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_command_queue_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_command_queue_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_command_queue_flags_t> for _ze_command_queue_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_command_queue_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_command_queue_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_command_queue_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported command queue flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_command_queue_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported command queue flags"] -pub use self::_ze_command_queue_flags_t as ze_command_queue_flags_t; -impl _ze_command_queue_mode_t { - #[doc = "< implicit default behavior; uses driver-based heuristics"] - pub const ZE_COMMAND_QUEUE_MODE_DEFAULT: _ze_command_queue_mode_t = _ze_command_queue_mode_t(0); -} -impl _ze_command_queue_mode_t { - #[doc = "< Device execution always completes immediately on execute;"] - #[doc = "< Host thread is blocked using wait on implicit synchronization object"] - pub const ZE_COMMAND_QUEUE_MODE_SYNCHRONOUS: _ze_command_queue_mode_t = - _ze_command_queue_mode_t(1); -} -impl _ze_command_queue_mode_t { - #[doc = "< Device execution is scheduled and will complete in future;"] - #[doc = "< explicit synchronization object must be used to determine completeness"] - pub const ZE_COMMAND_QUEUE_MODE_ASYNCHRONOUS: _ze_command_queue_mode_t = - _ze_command_queue_mode_t(2); -} -impl _ze_command_queue_mode_t { - pub const ZE_COMMAND_QUEUE_MODE_FORCE_UINT32: _ze_command_queue_mode_t = - _ze_command_queue_mode_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported command queue modes"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_command_queue_mode_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported command queue modes"] -pub use self::_ze_command_queue_mode_t as ze_command_queue_mode_t; -impl _ze_command_queue_priority_t { - #[doc = "< [default] normal priority"] - pub const ZE_COMMAND_QUEUE_PRIORITY_NORMAL: _ze_command_queue_priority_t = - _ze_command_queue_priority_t(0); -} -impl _ze_command_queue_priority_t { - #[doc = "< lower priority than normal"] - pub const ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_LOW: _ze_command_queue_priority_t = - _ze_command_queue_priority_t(1); -} -impl _ze_command_queue_priority_t { - #[doc = "< higher priority than normal"] - pub const ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_HIGH: _ze_command_queue_priority_t = - _ze_command_queue_priority_t(2); -} -impl _ze_command_queue_priority_t { - pub const ZE_COMMAND_QUEUE_PRIORITY_FORCE_UINT32: _ze_command_queue_priority_t = - _ze_command_queue_priority_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported command queue priorities"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_command_queue_priority_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported command queue priorities"] -pub use self::_ze_command_queue_priority_t as ze_command_queue_priority_t; -#[doc = ""] -#[doc = " @brief Command Queue descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_queue_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] command queue group ordinal"] - pub ordinal: u32, - #[doc = "< [in] command queue index within the group;"] - #[doc = "< must be zero if ::ZE_COMMAND_QUEUE_FLAG_EXPLICIT_ONLY is not set"] - pub index: u32, - #[doc = "< [in] usage flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_command_queue_flags_t;"] - #[doc = "< default behavior may use implicit driver-based heuristics to balance"] - #[doc = "< latency and throughput."] - pub flags: ze_command_queue_flags_t, - #[doc = "< [in] operation mode"] - pub mode: ze_command_queue_mode_t, - #[doc = "< [in] priority"] - pub priority: ze_command_queue_priority_t, -} -#[test] -fn bindgen_test_layout__ze_command_queue_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_queue_desc_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_command_queue_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_queue_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_command_queue_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_queue_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_queue_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_desc_t>())).ordinal as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_desc_t), - "::", - stringify!(ordinal) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_queue_desc_t>())).index as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_desc_t), - "::", - stringify!(index) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_queue_desc_t>())).flags as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_desc_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_queue_desc_t>())).mode as *const _ as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_desc_t), - "::", - stringify!(mode) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_desc_t>())).priority as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_desc_t), - "::", - stringify!(priority) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a command queue on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - A command queue represents a logical input stream to the device, tied"] - #[doc = " to a physical input stream."] - #[doc = " - The application must only use the command queue for the device, or its"] - #[doc = " sub-devices, which was provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clCreateCommandQueue**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phCommandQueue`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < desc->flags`"] - #[doc = " + `::ZE_COMMAND_QUEUE_MODE_ASYNCHRONOUS < desc->mode`"] - #[doc = " + `::ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_HIGH < desc->priority`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeCommandQueueCreate( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - desc: *const ze_command_queue_desc_t, - phCommandQueue: *mut ze_command_queue_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys a command queue."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must destroy all fence handles created from the"] - #[doc = " command queue before destroying the command queue itself"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the command queue before it is deleted"] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this command queue"] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command queue handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clReleaseCommandQueue**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandQueue`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeCommandQueueDestroy(hCommandQueue: ze_command_queue_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Executes a command list in a command queue."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The command lists are submitted to the device in the order they are"] - #[doc = " received, whether from multiple calls (on the same or different"] - #[doc = " threads) or a single call with multiple command lists."] - #[doc = " - The application must ensure the command lists are accessible by the"] - #[doc = " device on which the command queue was created."] - #[doc = " - The application must ensure the command lists are not currently"] - #[doc = " referencing the command list since the implementation is allowed to"] - #[doc = " modify the contents of the command list for submission."] - #[doc = " - The application must only execute command lists created with an"] - #[doc = " identical command queue group ordinal to the command queue."] - #[doc = " - The application must use a fence created using the same command queue."] - #[doc = " - The application must ensure the command queue, command list and fence"] - #[doc = " were created on the same context."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - vkQueueSubmit"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandQueue`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == phCommandLists`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `0 == numCommandLists`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_COMMAND_LIST_TYPE"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - pub fn zeCommandQueueExecuteCommandLists( - hCommandQueue: ze_command_queue_handle_t, - numCommandLists: u32, - phCommandLists: *mut ze_command_list_handle_t, - hFence: ze_fence_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Synchronizes a command queue by waiting on the host."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandQueue`"] - #[doc = " - ::ZE_RESULT_NOT_READY"] - #[doc = " + timeout expired"] - pub fn zeCommandQueueSynchronize( - hCommandQueue: ze_command_queue_handle_t, - timeout: u64, - ) -> ze_result_t; -} -impl _ze_command_list_flags_t { - #[doc = "< driver may reorder commands (e.g., kernels, copies) between barriers"] - #[doc = "< and synchronization primitives."] - #[doc = "< using this flag may increase Host overhead of ::zeCommandListClose."] - #[doc = "< therefore, this flag should **not** be set for low-latency usage-models."] - pub const ZE_COMMAND_LIST_FLAG_RELAXED_ORDERING: _ze_command_list_flags_t = - _ze_command_list_flags_t(1); -} -impl _ze_command_list_flags_t { - #[doc = "< driver may perform additional optimizations that increase execution"] - #[doc = "< throughput."] - #[doc = "< using this flag may increase Host overhead of ::zeCommandListClose and ::zeCommandQueueExecuteCommandLists."] - #[doc = "< therefore, this flag should **not** be set for low-latency usage-models."] - pub const ZE_COMMAND_LIST_FLAG_MAXIMIZE_THROUGHPUT: _ze_command_list_flags_t = - _ze_command_list_flags_t(2); -} -impl _ze_command_list_flags_t { - #[doc = "< command list should be optimized for submission to a single command"] - #[doc = "< queue and device engine."] - #[doc = "< driver **must** disable any implicit optimizations for distributing"] - #[doc = "< work across multiple engines."] - #[doc = "< this flag should be used when applications want full control over"] - #[doc = "< multi-engine submission and scheduling."] - pub const ZE_COMMAND_LIST_FLAG_EXPLICIT_ONLY: _ze_command_list_flags_t = - _ze_command_list_flags_t(4); -} -impl _ze_command_list_flags_t { - pub const ZE_COMMAND_LIST_FLAG_FORCE_UINT32: _ze_command_list_flags_t = - _ze_command_list_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_command_list_flags_t> for _ze_command_list_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_command_list_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_command_list_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_command_list_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_command_list_flags_t> for _ze_command_list_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_command_list_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_command_list_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_command_list_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported command list creation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_command_list_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported command list creation flags"] -pub use self::_ze_command_list_flags_t as ze_command_list_flags_t; -#[doc = ""] -#[doc = " @brief Command List descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] command queue group ordinal to which this command list will be"] - #[doc = "< submitted"] - pub commandQueueGroupOrdinal: u32, - #[doc = "< [in] usage flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_command_list_flags_t;"] - #[doc = "< default behavior may use implicit driver-based heuristics to balance"] - #[doc = "< latency and throughput."] - pub flags: ze_command_list_flags_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_command_list_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_command_list_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_list_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_list_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_desc_t>())).commandQueueGroupOrdinal as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_desc_t), - "::", - stringify!(commandQueueGroupOrdinal) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_command_list_desc_t>())).flags as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_desc_t), - "::", - stringify!(flags) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a command list on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - A command list represents a sequence of commands for execution on a"] - #[doc = " command queue."] - #[doc = " - The command list is created in the 'open' state."] - #[doc = " - The application must only use the command list for the device, or its"] - #[doc = " sub-devices, which was provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x7 < desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeCommandListCreate( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - desc: *const ze_command_list_desc_t, - phCommandList: *mut ze_command_list_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates an immediate command list on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - An immediate command list is used for low-latency submission of"] - #[doc = " commands."] - #[doc = " - An immediate command list creates an implicit command queue."] - #[doc = " - The command list is created in the 'open' state and never needs to be"] - #[doc = " closed."] - #[doc = " - The application must only use the command list for the device, or its"] - #[doc = " sub-devices, which was provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == altdesc`"] - #[doc = " + `nullptr == phCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < altdesc->flags`"] - #[doc = " + `::ZE_COMMAND_QUEUE_MODE_ASYNCHRONOUS < altdesc->mode`"] - #[doc = " + `::ZE_COMMAND_QUEUE_PRIORITY_PRIORITY_HIGH < altdesc->priority`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeCommandListCreateImmediate( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - altdesc: *const ze_command_queue_desc_t, - phCommandList: *mut ze_command_list_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys a command list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the command list before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this command list."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeCommandListDestroy(hCommandList: ze_command_list_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Closes a command list; ready to be executed by a command queue."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - pub fn zeCommandListClose(hCommandList: ze_command_list_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Reset a command list to initial (empty) state; ready for appending"] - #[doc = " commands."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the command list before it is reset"] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - pub fn zeCommandListReset(hCommandList: ze_command_list_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Appends a memory write of the device's global timestamp value into a"] - #[doc = " command list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The timestamp frequency can be queried from"] - #[doc = " ::ze_device_properties_t.timerResolution."] - #[doc = " - The number of valid bits in the timestamp value can be queried from"] - #[doc = " ::ze_device_properties_t.timestampValidBits."] - #[doc = " - The application must ensure the memory pointed to by dstptr is"] - #[doc = " accessible by the device on which the command list was created."] - #[doc = " - The application must ensure the command list and events were created,"] - #[doc = " and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == dstptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendWriteGlobalTimestamp( - hCommandList: ze_command_list_handle_t, - dstptr: *mut u64, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Appends an execution and global memory barrier into a command list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - If numWaitEvents is zero, then all previous commands are completed"] - #[doc = " prior to the execution of the barrier."] - #[doc = " - If numWaitEvents is non-zero, then then all phWaitEvents must be"] - #[doc = " signaled prior to the execution of the barrier."] - #[doc = " - This command blocks all following commands from beginning until the"] - #[doc = " execution of the barrier completes."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkCmdPipelineBarrier**"] - #[doc = " - clEnqueueBarrierWithWaitList"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendBarrier( - hCommandList: ze_command_list_handle_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Appends a global memory ranges barrier into a command list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - If numWaitEvents is zero, then all previous commands are completed"] - #[doc = " prior to the execution of the barrier."] - #[doc = " - If numWaitEvents is non-zero, then then all phWaitEvents must be"] - #[doc = " signaled prior to the execution of the barrier."] - #[doc = " - This command blocks all following commands from beginning until the"] - #[doc = " execution of the barrier completes."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pRangeSizes`"] - #[doc = " + `nullptr == pRanges`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendMemoryRangesBarrier( - hCommandList: ze_command_list_handle_t, - numRanges: u32, - pRangeSizes: *const usize, - pRanges: *mut *const ::std::os::raw::c_void, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Ensures in-bound writes to the device are globally observable."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - This is a special-case system level barrier that can be used to ensure"] - #[doc = " global observability of writes;"] - #[doc = " typically needed after a producer (e.g., NIC) performs direct writes"] - #[doc = " to the device's memory (e.g., Direct RDMA writes)."] - #[doc = " This is typically required when the memory corresponding to the writes"] - #[doc = " is subsequently accessed from a remote device."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - pub fn zeContextSystemBarrier( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Copies host, device, or shared memory."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the memory pointed to by dstptr and srcptr"] - #[doc = " is accessible by the device on which the command list was created."] - #[doc = " - The implementation must not access the memory pointed to by dstptr and"] - #[doc = " srcptr as they are free to be modified by either the Host or device up"] - #[doc = " until execution."] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The application must ensure the command list and events were created,"] - #[doc = " and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clEnqueueCopyBuffer**"] - #[doc = " - **clEnqueueReadBuffer**"] - #[doc = " - **clEnqueueWriteBuffer**"] - #[doc = " - **clEnqueueSVMMemcpy**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == dstptr`"] - #[doc = " + `nullptr == srcptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendMemoryCopy( - hCommandList: ze_command_list_handle_t, - dstptr: *mut ::std::os::raw::c_void, - srcptr: *const ::std::os::raw::c_void, - size: usize, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Initializes host, device, or shared memory."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the memory pointed to by dstptr is"] - #[doc = " accessible by the device on which the command list was created."] - #[doc = " - The implementation must not access the memory pointed to by dstptr as"] - #[doc = " it is free to be modified by either the Host or device up until"] - #[doc = " execution."] - #[doc = " - The value to initialize memory to is described by the pattern and the"] - #[doc = " pattern size."] - #[doc = " - The pattern size must be a power-of-two and less than"] - #[doc = " ::ze_command_queue_group_properties_t.maxMemoryFillPatternSize."] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The application must enusre the command list and events were created,"] - #[doc = " and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clEnqueueFillBuffer**"] - #[doc = " - **clEnqueueSVMMemFill**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " + `nullptr == pattern`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendMemoryFill( - hCommandList: ze_command_list_handle_t, - ptr: *mut ::std::os::raw::c_void, - pattern: *const ::std::os::raw::c_void, - pattern_size: usize, - size: usize, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Copy region descriptor"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_copy_region_t { - #[doc = "< [in] The origin x offset for region in bytes"] - pub originX: u32, - #[doc = "< [in] The origin y offset for region in rows"] - pub originY: u32, - #[doc = "< [in] The origin z offset for region in slices"] - pub originZ: u32, - #[doc = "< [in] The region width relative to origin in bytes"] - pub width: u32, - #[doc = "< [in] The region height relative to origin in rows"] - pub height: u32, - #[doc = "< [in] The region depth relative to origin in slices. Set this to 0 for"] - #[doc = "< 2D copy."] - pub depth: u32, -} -#[test] -fn bindgen_test_layout__ze_copy_region_t() { - assert_eq!( - ::std::mem::size_of::<_ze_copy_region_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_copy_region_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_copy_region_t>(), - 4usize, - concat!("Alignment of ", stringify!(_ze_copy_region_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_copy_region_t>())).originX as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_copy_region_t), - "::", - stringify!(originX) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_copy_region_t>())).originY as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(_ze_copy_region_t), - "::", - stringify!(originY) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_copy_region_t>())).originZ as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_copy_region_t), - "::", - stringify!(originZ) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_copy_region_t>())).width as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(_ze_copy_region_t), - "::", - stringify!(width) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_copy_region_t>())).height as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_copy_region_t), - "::", - stringify!(height) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_copy_region_t>())).depth as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_copy_region_t), - "::", - stringify!(depth) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Copies a region from a 2D or 3D array of host, device, or shared"] - #[doc = " memory."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the memory pointed to by dstptr and srcptr"] - #[doc = " is accessible by the device on which the command list was created."] - #[doc = " - The implementation must not access the memory pointed to by dstptr and"] - #[doc = " srcptr as they are free to be modified by either the Host or device up"] - #[doc = " until execution."] - #[doc = " - The region width, height, and depth for both src and dst must be same."] - #[doc = " The origins can be different."] - #[doc = " - The src and dst regions cannot be overlapping."] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The application must ensure the command list and events were created,"] - #[doc = " and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == dstptr`"] - #[doc = " + `nullptr == dstRegion`"] - #[doc = " + `nullptr == srcptr`"] - #[doc = " + `nullptr == srcRegion`"] - #[doc = " - ::ZE_RESULT_ERROR_OVERLAPPING_REGIONS"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendMemoryCopyRegion( - hCommandList: ze_command_list_handle_t, - dstptr: *mut ::std::os::raw::c_void, - dstRegion: *const ze_copy_region_t, - dstPitch: u32, - dstSlicePitch: u32, - srcptr: *const ::std::os::raw::c_void, - srcRegion: *const ze_copy_region_t, - srcPitch: u32, - srcSlicePitch: u32, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Copies host, device, or shared memory from another context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The current active and source context must be from the same driver."] - #[doc = " - The application must ensure the memory pointed to by dstptr and srcptr"] - #[doc = " is accessible by the device on which the command list was created."] - #[doc = " - The implementation must not access the memory pointed to by dstptr and"] - #[doc = " srcptr as they are free to be modified by either the Host or device up"] - #[doc = " until execution."] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The application must ensure the command list and events were created,"] - #[doc = " and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hContextSrc`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == dstptr`"] - #[doc = " + `nullptr == srcptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendMemoryCopyFromContext( - hCommandList: ze_command_list_handle_t, - dstptr: *mut ::std::os::raw::c_void, - hContextSrc: ze_context_handle_t, - srcptr: *const ::std::os::raw::c_void, - size: usize, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Copies an image."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the image and events are accessible by the"] - #[doc = " device on which the command list was created."] - #[doc = " - The application must ensure the image format descriptors for both"] - #[doc = " source and destination images are the same."] - #[doc = " - The application must ensure the command list, images and events were"] - #[doc = " created on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clEnqueueCopyImage**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hDstImage`"] - #[doc = " + `nullptr == hSrcImage`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendImageCopy( - hCommandList: ze_command_list_handle_t, - hDstImage: ze_image_handle_t, - hSrcImage: ze_image_handle_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Region descriptor"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_image_region_t { - #[doc = "< [in] The origin x offset for region in pixels"] - pub originX: u32, - #[doc = "< [in] The origin y offset for region in pixels"] - pub originY: u32, - #[doc = "< [in] The origin z offset for region in pixels"] - pub originZ: u32, - #[doc = "< [in] The region width relative to origin in pixels"] - pub width: u32, - #[doc = "< [in] The region height relative to origin in pixels"] - pub height: u32, - #[doc = "< [in] The region depth relative to origin. For 1D or 2D images, set"] - #[doc = "< this to 1."] - pub depth: u32, -} -#[test] -fn bindgen_test_layout__ze_image_region_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_region_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_image_region_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_region_t>(), - 4usize, - concat!("Alignment of ", stringify!(_ze_image_region_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_region_t>())).originX as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_region_t), - "::", - stringify!(originX) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_region_t>())).originY as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_region_t), - "::", - stringify!(originY) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_region_t>())).originZ as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_region_t), - "::", - stringify!(originZ) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_region_t>())).width as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_region_t), - "::", - stringify!(width) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_region_t>())).height as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_region_t), - "::", - stringify!(height) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_region_t>())).depth as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_region_t), - "::", - stringify!(depth) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Copies a region of an image to another image."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the image and events are accessible by the"] - #[doc = " device on which the command list was created."] - #[doc = " - The region width and height for both src and dst must be same. The"] - #[doc = " origins can be different."] - #[doc = " - The src and dst regions cannot be overlapping."] - #[doc = " - The application must ensure the image format descriptors for both"] - #[doc = " source and destination images are the same."] - #[doc = " - The application must ensure the command list, images and events were"] - #[doc = " created, and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hDstImage`"] - #[doc = " + `nullptr == hSrcImage`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_OVERLAPPING_REGIONS"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendImageCopyRegion( - hCommandList: ze_command_list_handle_t, - hDstImage: ze_image_handle_t, - hSrcImage: ze_image_handle_t, - pDstRegion: *const ze_image_region_t, - pSrcRegion: *const ze_image_region_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Copies from an image to device or shared memory."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the memory pointed to by dstptr is"] - #[doc = " accessible by the device on which the command list was created."] - #[doc = " - The implementation must not access the memory pointed to by dstptr as"] - #[doc = " it is free to be modified by either the Host or device up until"] - #[doc = " execution."] - #[doc = " - The application must ensure the image and events are accessible by the"] - #[doc = " device on which the command list was created."] - #[doc = " - The application must ensure the image format descriptor for the source"] - #[doc = " image is not a media format."] - #[doc = " - The application must ensure the command list, image and events were"] - #[doc = " created, and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clEnqueueReadImage"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hSrcImage`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == dstptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendImageCopyToMemory( - hCommandList: ze_command_list_handle_t, - dstptr: *mut ::std::os::raw::c_void, - hSrcImage: ze_image_handle_t, - pSrcRegion: *const ze_image_region_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Copies to an image from device or shared memory."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the memory pointed to by srcptr is"] - #[doc = " accessible by the device on which the command list was created."] - #[doc = " - The implementation must not access the memory pointed to by srcptr as"] - #[doc = " it is free to be modified by either the Host or device up until"] - #[doc = " execution."] - #[doc = " - The application must ensure the image and events are accessible by the"] - #[doc = " device on which the command list was created."] - #[doc = " - The application must ensure the image format descriptor for the"] - #[doc = " destination image is not a media format."] - #[doc = " - The application must ensure the command list, image and events were"] - #[doc = " created, and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clEnqueueWriteImage"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hDstImage`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == srcptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendImageCopyFromMemory( - hCommandList: ze_command_list_handle_t, - hDstImage: ze_image_handle_t, - srcptr: *const ::std::os::raw::c_void, - pDstRegion: *const ze_image_region_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Asynchronously prefetches shared memory to the device associated with"] - #[doc = " the specified command list"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - This is a hint to improve performance only and is not required for"] - #[doc = " correctness."] - #[doc = " - Only prefetching to the device associated with the specified command"] - #[doc = " list is supported."] - #[doc = " Prefetching to the host or to a peer device is not supported."] - #[doc = " - Prefetching may not be supported for all allocation types for all devices."] - #[doc = " If memory prefetching is not supported for the specified memory range"] - #[doc = " the prefetch hint may be ignored."] - #[doc = " - Prefetching may only be supported at a device-specific granularity,"] - #[doc = " such as at a page boundary."] - #[doc = " In this case, the memory range may be expanded such that the start and"] - #[doc = " end of the range satisfy granularity requirements."] - #[doc = " - The application must ensure the memory pointed to by ptr is accessible"] - #[doc = " by the device on which the command list was created."] - #[doc = " - The application must ensure the command list was created, and the"] - #[doc = " memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clEnqueueSVMMigrateMem"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - pub fn zeCommandListAppendMemoryPrefetch( - hCommandList: ze_command_list_handle_t, - ptr: *const ::std::os::raw::c_void, - size: usize, - ) -> ze_result_t; -} -impl _ze_memory_advice_t { - #[doc = "< hint that memory will be read from frequently and written to rarely"] - pub const ZE_MEMORY_ADVICE_SET_READ_MOSTLY: _ze_memory_advice_t = _ze_memory_advice_t(0); -} -impl _ze_memory_advice_t { - #[doc = "< removes the affect of ::ZE_MEMORY_ADVICE_SET_READ_MOSTLY"] - pub const ZE_MEMORY_ADVICE_CLEAR_READ_MOSTLY: _ze_memory_advice_t = _ze_memory_advice_t(1); -} -impl _ze_memory_advice_t { - #[doc = "< hint that the preferred memory location is the specified device"] - pub const ZE_MEMORY_ADVICE_SET_PREFERRED_LOCATION: _ze_memory_advice_t = _ze_memory_advice_t(2); -} -impl _ze_memory_advice_t { - #[doc = "< removes the affect of ::ZE_MEMORY_ADVICE_SET_PREFERRED_LOCATION"] - pub const ZE_MEMORY_ADVICE_CLEAR_PREFERRED_LOCATION: _ze_memory_advice_t = - _ze_memory_advice_t(3); -} -impl _ze_memory_advice_t { - #[doc = "< hints that memory will mostly be accessed non-atomically"] - pub const ZE_MEMORY_ADVICE_SET_NON_ATOMIC_MOSTLY: _ze_memory_advice_t = _ze_memory_advice_t(4); -} -impl _ze_memory_advice_t { - #[doc = "< removes the affect of ::ZE_MEMORY_ADVICE_SET_NON_ATOMIC_MOSTLY"] - pub const ZE_MEMORY_ADVICE_CLEAR_NON_ATOMIC_MOSTLY: _ze_memory_advice_t = - _ze_memory_advice_t(5); -} -impl _ze_memory_advice_t { - #[doc = "< hints that memory should be cached"] - pub const ZE_MEMORY_ADVICE_BIAS_CACHED: _ze_memory_advice_t = _ze_memory_advice_t(6); -} -impl _ze_memory_advice_t { - #[doc = "< hints that memory should be not be cached"] - pub const ZE_MEMORY_ADVICE_BIAS_UNCACHED: _ze_memory_advice_t = _ze_memory_advice_t(7); -} -impl _ze_memory_advice_t { - pub const ZE_MEMORY_ADVICE_FORCE_UINT32: _ze_memory_advice_t = _ze_memory_advice_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported memory advice hints"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_memory_advice_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported memory advice hints"] -pub use self::_ze_memory_advice_t as ze_memory_advice_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Provides advice about the use of a shared memory range"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Memory advice is a performance hint only and is not required for"] - #[doc = " functional correctness."] - #[doc = " - Memory advice can be used to override driver heuristics to explicitly"] - #[doc = " control shared memory behavior."] - #[doc = " - Not all memory advice hints may be supported for all allocation types"] - #[doc = " for all devices."] - #[doc = " If a memory advice hint is not supported by the device it will be ignored."] - #[doc = " - Memory advice may only be supported at a device-specific granularity,"] - #[doc = " such as at a page boundary."] - #[doc = " In this case, the memory range may be expanded such that the start and"] - #[doc = " end of the range satisfy granularity requirements."] - #[doc = " - The application must ensure the memory pointed to by ptr is accessible"] - #[doc = " by the device on which the command list was created."] - #[doc = " - The application must ensure the command list was created, and memory"] - #[doc = " was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle, and the memory was"] - #[doc = " allocated."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `::ZE_MEMORY_ADVICE_BIAS_UNCACHED < advice`"] - pub fn zeCommandListAppendMemAdvise( - hCommandList: ze_command_list_handle_t, - hDevice: ze_device_handle_t, - ptr: *const ::std::os::raw::c_void, - size: usize, - advice: ze_memory_advice_t, - ) -> ze_result_t; -} -impl _ze_event_pool_flags_t { - #[doc = "< signals and waits are also visible to host"] - pub const ZE_EVENT_POOL_FLAG_HOST_VISIBLE: _ze_event_pool_flags_t = _ze_event_pool_flags_t(1); -} -impl _ze_event_pool_flags_t { - #[doc = "< signals and waits may be shared across processes"] - pub const ZE_EVENT_POOL_FLAG_IPC: _ze_event_pool_flags_t = _ze_event_pool_flags_t(2); -} -impl _ze_event_pool_flags_t { - #[doc = "< Indicates all events in pool will contain kernel timestamps; cannot be"] - #[doc = "< combined with ::ZE_EVENT_POOL_FLAG_IPC"] - pub const ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP: _ze_event_pool_flags_t = - _ze_event_pool_flags_t(4); -} -impl _ze_event_pool_flags_t { - pub const ZE_EVENT_POOL_FLAG_FORCE_UINT32: _ze_event_pool_flags_t = - _ze_event_pool_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_event_pool_flags_t> for _ze_event_pool_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_event_pool_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_event_pool_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_event_pool_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_event_pool_flags_t> for _ze_event_pool_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_event_pool_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_event_pool_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_event_pool_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported event pool creation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_event_pool_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported event pool creation flags"] -pub use self::_ze_event_pool_flags_t as ze_event_pool_flags_t; -#[doc = ""] -#[doc = " @brief Event pool descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_pool_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] creation flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_event_pool_flags_t;"] - #[doc = "< default behavior is signals and waits are visible to the entire device"] - #[doc = "< and peer devices."] - pub flags: ze_event_pool_flags_t, - #[doc = "< [in] number of events within the pool; must be greater than 0"] - pub count: u32, -} -#[test] -fn bindgen_test_layout__ze_event_pool_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_pool_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_event_pool_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_pool_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_pool_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_pool_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_pool_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_pool_desc_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_desc_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_pool_desc_t>())).count as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_desc_t), - "::", - stringify!(count) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a pool of events on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must only use events within the pool for the"] - #[doc = " device(s), or their sub-devices, which were provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phEventPool`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x7 < desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `0 == desc->count`"] - #[doc = " + `(nullptr == phDevices) && (0 < numDevices)`"] - pub fn zeEventPoolCreate( - hContext: ze_context_handle_t, - desc: *const ze_event_pool_desc_t, - numDevices: u32, - phDevices: *mut ze_device_handle_t, - phEventPool: *mut ze_event_pool_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Deletes an event pool object."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must destroy all event handles created from the pool"] - #[doc = " before destroying the pool itself."] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the any event within the pool before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this event pool."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same event pool handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEventPool`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeEventPoolDestroy(hEventPool: ze_event_pool_handle_t) -> ze_result_t; -} -impl _ze_event_scope_flags_t { - #[doc = "< cache hierarchies are flushed or invalidated sufficient for local"] - #[doc = "< sub-device access"] - pub const ZE_EVENT_SCOPE_FLAG_SUBDEVICE: _ze_event_scope_flags_t = _ze_event_scope_flags_t(1); -} -impl _ze_event_scope_flags_t { - #[doc = "< cache hierarchies are flushed or invalidated sufficient for global"] - #[doc = "< device access and peer device access"] - pub const ZE_EVENT_SCOPE_FLAG_DEVICE: _ze_event_scope_flags_t = _ze_event_scope_flags_t(2); -} -impl _ze_event_scope_flags_t { - #[doc = "< cache hierarchies are flushed or invalidated sufficient for device and"] - #[doc = "< host access"] - pub const ZE_EVENT_SCOPE_FLAG_HOST: _ze_event_scope_flags_t = _ze_event_scope_flags_t(4); -} -impl _ze_event_scope_flags_t { - pub const ZE_EVENT_SCOPE_FLAG_FORCE_UINT32: _ze_event_scope_flags_t = - _ze_event_scope_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_event_scope_flags_t> for _ze_event_scope_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_event_scope_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_event_scope_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_event_scope_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_event_scope_flags_t> for _ze_event_scope_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_event_scope_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_event_scope_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_event_scope_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported event scope flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_event_scope_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported event scope flags"] -pub use self::_ze_event_scope_flags_t as ze_event_scope_flags_t; -#[doc = ""] -#[doc = " @brief Event descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] index of the event within the pool; must be less-than the count"] - #[doc = "< specified during pool creation"] - pub index: u32, - #[doc = "< [in] defines the scope of relevant cache hierarchies to flush on a"] - #[doc = "< signal action before the event is triggered."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_event_scope_flags_t;"] - #[doc = "< default behavior is synchronization within the command list only, no"] - #[doc = "< additional cache hierarchies are flushed."] - pub signal: ze_event_scope_flags_t, - #[doc = "< [in] defines the scope of relevant cache hierarchies to invalidate on"] - #[doc = "< a wait action after the event is complete."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_event_scope_flags_t;"] - #[doc = "< default behavior is synchronization within the command list only, no"] - #[doc = "< additional cache hierarchies are invalidated."] - pub wait: ze_event_scope_flags_t, -} -#[test] -fn bindgen_test_layout__ze_event_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_desc_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_event_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_desc_t>())).index as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_desc_t), - "::", - stringify!(index) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_desc_t>())).signal as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_desc_t), - "::", - stringify!(signal) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_desc_t>())).wait as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_desc_t), - "::", - stringify!(wait) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates an event from the pool."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - An event is used to communicate fine-grain host-to-device,"] - #[doc = " device-to-host or device-to-device dependencies have completed."] - #[doc = " - The application must ensure the location in the pool is not being used"] - #[doc = " by another event."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same event pool handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clCreateUserEvent**"] - #[doc = " - vkCreateEvent"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEventPool`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x7 < desc->signal`"] - #[doc = " + `0x7 < desc->wait`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - pub fn zeEventCreate( - hEventPool: ze_event_pool_handle_t, - desc: *const ze_event_desc_t, - phEvent: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Deletes an event object."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the event before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this event."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same event handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clReleaseEvent**"] - #[doc = " - vkDestroyEvent"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeEventDestroy(hEvent: ze_event_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Gets an IPC event pool handle for the specified event handle that can"] - #[doc = " be shared with another process."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Event pool must have been created with ::ZE_EVENT_POOL_FLAG_IPC."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEventPool`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == phIpc`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - pub fn zeEventPoolGetIpcHandle( - hEventPool: ze_event_pool_handle_t, - phIpc: *mut ze_ipc_event_pool_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Opens an IPC event pool handle to retrieve an event pool handle from"] - #[doc = " another process."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Multiple calls to this function with the same IPC handle will return"] - #[doc = " unique event pool handles."] - #[doc = " - The event handle in this process should not be freed with"] - #[doc = " ::zeEventPoolDestroy, but rather with ::zeEventPoolCloseIpcHandle."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == phEventPool`"] - pub fn zeEventPoolOpenIpcHandle( - hContext: ze_context_handle_t, - hIpc: ze_ipc_event_pool_handle_t, - phEventPool: *mut ze_event_pool_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Closes an IPC event handle in the current process."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Closes an IPC event handle by destroying events that were opened in"] - #[doc = " this process using ::zeEventPoolOpenIpcHandle."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same event pool handle."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEventPool`"] - pub fn zeEventPoolCloseIpcHandle(hEventPool: ze_event_pool_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Appends a signal of the event from the device into a command list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The duration of an event created from an event pool that was created"] - #[doc = " using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag is undefined."] - #[doc = " However, for consistency and orthogonality the event will report"] - #[doc = " correctly as signaled when used by other event API functionality."] - #[doc = " - The application must ensure the command list and events were created"] - #[doc = " on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clSetUserEventStatus**"] - #[doc = " - vkCmdSetEvent"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - pub fn zeCommandListAppendSignalEvent( - hCommandList: ze_command_list_handle_t, - hEvent: ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Appends wait on event(s) on the device into a command list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The application must ensure the command list and events were created"] - #[doc = " on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == phEvents`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - pub fn zeCommandListAppendWaitOnEvents( - hCommandList: ze_command_list_handle_t, - numEvents: u32, - phEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Signals a event from host."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The duration of an event created from an event pool that was created"] - #[doc = " using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag is undefined."] - #[doc = " However, for consistency and orthogonality the event will report"] - #[doc = " correctly as signaled when used by other event API functionality."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clSetUserEventStatus"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - pub fn zeEventHostSignal(hEvent: ze_event_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief The current host thread waits on an event to be signaled."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clWaitForEvents"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_NOT_READY"] - #[doc = " + timeout expired"] - pub fn zeEventHostSynchronize(hEvent: ze_event_handle_t, timeout: u64) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Queries an event object's status on the host."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **clGetEventInfo**"] - #[doc = " - vkGetEventStatus"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_NOT_READY"] - #[doc = " + not signaled"] - pub fn zeEventQueryStatus(hEvent: ze_event_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Appends a reset of an event back to not signaled state into a command"] - #[doc = " list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The application must ensure the command list and events were created"] - #[doc = " on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - vkResetEvent"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - pub fn zeCommandListAppendEventReset( - hCommandList: ze_command_list_handle_t, - hEvent: ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief The current host thread resets an event back to not signaled state."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - vkResetEvent"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - pub fn zeEventHostReset(hEvent: ze_event_handle_t) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Kernel timestamp clock data"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - The timestamp frequency can be queried from"] -#[doc = " ::ze_device_properties_t.timerResolution."] -#[doc = " - The number of valid bits in the timestamp value can be queried from"] -#[doc = " ::ze_device_properties_t.kernelTimestampValidBits."] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_kernel_timestamp_data_t { - #[doc = "< [out] device clock at start of kernel execution"] - pub kernelStart: u64, - #[doc = "< [out] device clock at end of kernel execution"] - pub kernelEnd: u64, -} -#[test] -fn bindgen_test_layout__ze_kernel_timestamp_data_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_timestamp_data_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_kernel_timestamp_data_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_timestamp_data_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_timestamp_data_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_timestamp_data_t>())).kernelStart as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_timestamp_data_t), - "::", - stringify!(kernelStart) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_timestamp_data_t>())).kernelEnd as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_timestamp_data_t), - "::", - stringify!(kernelEnd) - ) - ); -} -#[doc = ""] -#[doc = " @brief Kernel timestamp result"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_kernel_timestamp_result_t { - #[doc = "< [out] wall-clock data"] - pub global: ze_kernel_timestamp_data_t, - #[doc = "< [out] context-active data; only includes clocks while device context"] - #[doc = "< was actively executing."] - pub context: ze_kernel_timestamp_data_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_timestamp_result_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_timestamp_result_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_kernel_timestamp_result_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_timestamp_result_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_timestamp_result_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_timestamp_result_t>())).global as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_timestamp_result_t), - "::", - stringify!(global) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_timestamp_result_t>())).context as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_timestamp_result_t), - "::", - stringify!(context) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Queries an event's timestamp value on the host."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the event was created from an event pool"] - #[doc = " that was created using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag."] - #[doc = " - The destination memory will be unmodified if the event has not been"] - #[doc = " signaled."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hEvent`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == dstptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_NOT_READY"] - #[doc = " + not signaled"] - pub fn zeEventQueryKernelTimestamp( - hEvent: ze_event_handle_t, - dstptr: *mut ze_kernel_timestamp_result_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Appends a query of an events' timestamp value(s) into a command list."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the events are accessible by the device on"] - #[doc = " which the command list was created."] - #[doc = " - The application must ensure the events were created from an event pool"] - #[doc = " that was created using ::ZE_EVENT_POOL_FLAG_KERNEL_TIMESTAMP flag."] - #[doc = " - The application must ensure the memory pointed to by both dstptr and"] - #[doc = " pOffsets is accessible by the device on which the command list was"] - #[doc = " created."] - #[doc = " - The value(s) written to the destination buffer are undefined if any"] - #[doc = " timestamp event has not been signaled."] - #[doc = " - If pOffsets is nullptr, then multiple results will be appended"] - #[doc = " sequentially into memory in the same order as phEvents."] - #[doc = " - The application must ensure the command list and events were created,"] - #[doc = " and the memory was allocated, on the same context."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == phEvents`"] - #[doc = " + `nullptr == dstptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendQueryKernelTimestamps( - hCommandList: ze_command_list_handle_t, - numEvents: u32, - phEvents: *mut ze_event_handle_t, - dstptr: *mut ::std::os::raw::c_void, - pOffsets: *const usize, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -impl _ze_fence_flags_t { - #[doc = "< fence is created in the signaled state, otherwise not signaled."] - pub const ZE_FENCE_FLAG_SIGNALED: _ze_fence_flags_t = _ze_fence_flags_t(1); -} -impl _ze_fence_flags_t { - pub const ZE_FENCE_FLAG_FORCE_UINT32: _ze_fence_flags_t = _ze_fence_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_fence_flags_t> for _ze_fence_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_fence_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_fence_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_fence_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_fence_flags_t> for _ze_fence_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_fence_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_fence_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_fence_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported fence creation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_fence_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported fence creation flags"] -pub use self::_ze_fence_flags_t as ze_fence_flags_t; -#[doc = ""] -#[doc = " @brief Fence descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_fence_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] creation flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_fence_flags_t."] - pub flags: ze_fence_flags_t, -} -#[test] -fn bindgen_test_layout__ze_fence_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_fence_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_fence_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_fence_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_fence_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_fence_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_fence_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_fence_desc_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_desc_t), - "::", - stringify!(flags) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a fence for the command queue."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - A fence is a heavyweight synchronization primitive used to communicate"] - #[doc = " to the host that command list execution has completed."] - #[doc = " - The application must only use the fence for the command queue which"] - #[doc = " was provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkCreateFence**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandQueue`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phFence`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeFenceCreate( - hCommandQueue: ze_command_queue_handle_t, - desc: *const ze_fence_desc_t, - phFence: *mut ze_fence_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Deletes a fence object."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the fence before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this fence."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same fence handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkDestroyFence**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hFence`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeFenceDestroy(hFence: ze_fence_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief The current host thread waits on a fence to be signaled."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkWaitForFences**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hFence`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_NOT_READY"] - #[doc = " + timeout expired"] - pub fn zeFenceHostSynchronize(hFence: ze_fence_handle_t, timeout: u64) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Queries a fence object's status."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkGetFenceStatus**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hFence`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_NOT_READY"] - #[doc = " + not signaled"] - pub fn zeFenceQueryStatus(hFence: ze_fence_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Reset a fence back to the not signaled state."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - **vkResetFences**"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hFence`"] - pub fn zeFenceReset(hFence: ze_fence_handle_t) -> ze_result_t; -} -impl _ze_image_flags_t { - #[doc = "< kernels will write contents"] - pub const ZE_IMAGE_FLAG_KERNEL_WRITE: _ze_image_flags_t = _ze_image_flags_t(1); -} -impl _ze_image_flags_t { - #[doc = "< device should not cache contents"] - pub const ZE_IMAGE_FLAG_BIAS_UNCACHED: _ze_image_flags_t = _ze_image_flags_t(2); -} -impl _ze_image_flags_t { - pub const ZE_IMAGE_FLAG_FORCE_UINT32: _ze_image_flags_t = _ze_image_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_image_flags_t> for _ze_image_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_image_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_image_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_image_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_image_flags_t> for _ze_image_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_image_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_image_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_image_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported image creation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_image_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported image creation flags"] -pub use self::_ze_image_flags_t as ze_image_flags_t; -impl _ze_image_type_t { - #[doc = "< 1D"] - pub const ZE_IMAGE_TYPE_1D: _ze_image_type_t = _ze_image_type_t(0); -} -impl _ze_image_type_t { - #[doc = "< 1D array"] - pub const ZE_IMAGE_TYPE_1DARRAY: _ze_image_type_t = _ze_image_type_t(1); -} -impl _ze_image_type_t { - #[doc = "< 2D"] - pub const ZE_IMAGE_TYPE_2D: _ze_image_type_t = _ze_image_type_t(2); -} -impl _ze_image_type_t { - #[doc = "< 2D array"] - pub const ZE_IMAGE_TYPE_2DARRAY: _ze_image_type_t = _ze_image_type_t(3); -} -impl _ze_image_type_t { - #[doc = "< 3D"] - pub const ZE_IMAGE_TYPE_3D: _ze_image_type_t = _ze_image_type_t(4); -} -impl _ze_image_type_t { - #[doc = "< Buffer"] - pub const ZE_IMAGE_TYPE_BUFFER: _ze_image_type_t = _ze_image_type_t(5); -} -impl _ze_image_type_t { - pub const ZE_IMAGE_TYPE_FORCE_UINT32: _ze_image_type_t = _ze_image_type_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported image types"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_image_type_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported image types"] -pub use self::_ze_image_type_t as ze_image_type_t; -impl _ze_image_format_layout_t { - #[doc = "< 8-bit single component layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_8: _ze_image_format_layout_t = _ze_image_format_layout_t(0); -} -impl _ze_image_format_layout_t { - #[doc = "< 16-bit single component layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_16: _ze_image_format_layout_t = _ze_image_format_layout_t(1); -} -impl _ze_image_format_layout_t { - #[doc = "< 32-bit single component layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_32: _ze_image_format_layout_t = _ze_image_format_layout_t(2); -} -impl _ze_image_format_layout_t { - #[doc = "< 2-component 8-bit layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_8_8: _ze_image_format_layout_t = _ze_image_format_layout_t(3); -} -impl _ze_image_format_layout_t { - #[doc = "< 4-component 8-bit layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_8_8_8_8: _ze_image_format_layout_t = - _ze_image_format_layout_t(4); -} -impl _ze_image_format_layout_t { - #[doc = "< 2-component 16-bit layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_16_16: _ze_image_format_layout_t = - _ze_image_format_layout_t(5); -} -impl _ze_image_format_layout_t { - #[doc = "< 4-component 16-bit layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_16_16_16_16: _ze_image_format_layout_t = - _ze_image_format_layout_t(6); -} -impl _ze_image_format_layout_t { - #[doc = "< 2-component 32-bit layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_32_32: _ze_image_format_layout_t = - _ze_image_format_layout_t(7); -} -impl _ze_image_format_layout_t { - #[doc = "< 4-component 32-bit layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_32_32_32_32: _ze_image_format_layout_t = - _ze_image_format_layout_t(8); -} -impl _ze_image_format_layout_t { - #[doc = "< 4-component 10_10_10_2 layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_10_10_10_2: _ze_image_format_layout_t = - _ze_image_format_layout_t(9); -} -impl _ze_image_format_layout_t { - #[doc = "< 3-component 11_11_10 layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_11_11_10: _ze_image_format_layout_t = - _ze_image_format_layout_t(10); -} -impl _ze_image_format_layout_t { - #[doc = "< 3-component 5_6_5 layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_5_6_5: _ze_image_format_layout_t = - _ze_image_format_layout_t(11); -} -impl _ze_image_format_layout_t { - #[doc = "< 4-component 5_5_5_1 layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_5_5_5_1: _ze_image_format_layout_t = - _ze_image_format_layout_t(12); -} -impl _ze_image_format_layout_t { - #[doc = "< 4-component 4_4_4_4 layout"] - pub const ZE_IMAGE_FORMAT_LAYOUT_4_4_4_4: _ze_image_format_layout_t = - _ze_image_format_layout_t(13); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: Y8. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_Y8: _ze_image_format_layout_t = _ze_image_format_layout_t(14); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: NV12. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_NV12: _ze_image_format_layout_t = - _ze_image_format_layout_t(15); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: YUYV. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_YUYV: _ze_image_format_layout_t = - _ze_image_format_layout_t(16); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: VYUY. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_VYUY: _ze_image_format_layout_t = - _ze_image_format_layout_t(17); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: YVYU. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_YVYU: _ze_image_format_layout_t = - _ze_image_format_layout_t(18); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: UYVY. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_UYVY: _ze_image_format_layout_t = - _ze_image_format_layout_t(19); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: AYUV. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_AYUV: _ze_image_format_layout_t = - _ze_image_format_layout_t(20); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: P010. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_P010: _ze_image_format_layout_t = - _ze_image_format_layout_t(21); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: Y410. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_Y410: _ze_image_format_layout_t = - _ze_image_format_layout_t(22); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: P012. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_P012: _ze_image_format_layout_t = - _ze_image_format_layout_t(23); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: Y16. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_Y16: _ze_image_format_layout_t = _ze_image_format_layout_t(24); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: P016. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_P016: _ze_image_format_layout_t = - _ze_image_format_layout_t(25); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: Y216. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_Y216: _ze_image_format_layout_t = - _ze_image_format_layout_t(26); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: P216. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_P216: _ze_image_format_layout_t = - _ze_image_format_layout_t(27); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: P8. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_P8: _ze_image_format_layout_t = _ze_image_format_layout_t(28); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: YUY2. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_YUY2: _ze_image_format_layout_t = - _ze_image_format_layout_t(29); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: A8P8. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_A8P8: _ze_image_format_layout_t = - _ze_image_format_layout_t(30); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: IA44. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_IA44: _ze_image_format_layout_t = - _ze_image_format_layout_t(31); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: AI44. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_AI44: _ze_image_format_layout_t = - _ze_image_format_layout_t(32); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: Y416. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_Y416: _ze_image_format_layout_t = - _ze_image_format_layout_t(33); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: Y210. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_Y210: _ze_image_format_layout_t = - _ze_image_format_layout_t(34); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: I420. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_I420: _ze_image_format_layout_t = - _ze_image_format_layout_t(35); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: YV12. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_YV12: _ze_image_format_layout_t = - _ze_image_format_layout_t(36); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: 400P. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_400P: _ze_image_format_layout_t = - _ze_image_format_layout_t(37); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: 422H. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_422H: _ze_image_format_layout_t = - _ze_image_format_layout_t(38); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: 422V. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_422V: _ze_image_format_layout_t = - _ze_image_format_layout_t(39); -} -impl _ze_image_format_layout_t { - #[doc = "< Media Format: 444P. Format type and swizzle is ignored for this."] - pub const ZE_IMAGE_FORMAT_LAYOUT_444P: _ze_image_format_layout_t = - _ze_image_format_layout_t(40); -} -impl _ze_image_format_layout_t { - pub const ZE_IMAGE_FORMAT_LAYOUT_FORCE_UINT32: _ze_image_format_layout_t = - _ze_image_format_layout_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported image format layouts"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_image_format_layout_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported image format layouts"] -pub use self::_ze_image_format_layout_t as ze_image_format_layout_t; -impl _ze_image_format_type_t { - #[doc = "< Unsigned integer"] - pub const ZE_IMAGE_FORMAT_TYPE_UINT: _ze_image_format_type_t = _ze_image_format_type_t(0); -} -impl _ze_image_format_type_t { - #[doc = "< Signed integer"] - pub const ZE_IMAGE_FORMAT_TYPE_SINT: _ze_image_format_type_t = _ze_image_format_type_t(1); -} -impl _ze_image_format_type_t { - #[doc = "< Unsigned normalized integer"] - pub const ZE_IMAGE_FORMAT_TYPE_UNORM: _ze_image_format_type_t = _ze_image_format_type_t(2); -} -impl _ze_image_format_type_t { - #[doc = "< Signed normalized integer"] - pub const ZE_IMAGE_FORMAT_TYPE_SNORM: _ze_image_format_type_t = _ze_image_format_type_t(3); -} -impl _ze_image_format_type_t { - #[doc = "< Float"] - pub const ZE_IMAGE_FORMAT_TYPE_FLOAT: _ze_image_format_type_t = _ze_image_format_type_t(4); -} -impl _ze_image_format_type_t { - pub const ZE_IMAGE_FORMAT_TYPE_FORCE_UINT32: _ze_image_format_type_t = - _ze_image_format_type_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported image format types"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_image_format_type_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported image format types"] -pub use self::_ze_image_format_type_t as ze_image_format_type_t; -impl _ze_image_format_swizzle_t { - #[doc = "< Red component"] - pub const ZE_IMAGE_FORMAT_SWIZZLE_R: _ze_image_format_swizzle_t = _ze_image_format_swizzle_t(0); -} -impl _ze_image_format_swizzle_t { - #[doc = "< Green component"] - pub const ZE_IMAGE_FORMAT_SWIZZLE_G: _ze_image_format_swizzle_t = _ze_image_format_swizzle_t(1); -} -impl _ze_image_format_swizzle_t { - #[doc = "< Blue component"] - pub const ZE_IMAGE_FORMAT_SWIZZLE_B: _ze_image_format_swizzle_t = _ze_image_format_swizzle_t(2); -} -impl _ze_image_format_swizzle_t { - #[doc = "< Alpha component"] - pub const ZE_IMAGE_FORMAT_SWIZZLE_A: _ze_image_format_swizzle_t = _ze_image_format_swizzle_t(3); -} -impl _ze_image_format_swizzle_t { - #[doc = "< Zero"] - pub const ZE_IMAGE_FORMAT_SWIZZLE_0: _ze_image_format_swizzle_t = _ze_image_format_swizzle_t(4); -} -impl _ze_image_format_swizzle_t { - #[doc = "< One"] - pub const ZE_IMAGE_FORMAT_SWIZZLE_1: _ze_image_format_swizzle_t = _ze_image_format_swizzle_t(5); -} -impl _ze_image_format_swizzle_t { - #[doc = "< Don't care"] - pub const ZE_IMAGE_FORMAT_SWIZZLE_X: _ze_image_format_swizzle_t = _ze_image_format_swizzle_t(6); -} -impl _ze_image_format_swizzle_t { - pub const ZE_IMAGE_FORMAT_SWIZZLE_FORCE_UINT32: _ze_image_format_swizzle_t = - _ze_image_format_swizzle_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported image format component swizzle into channel"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_image_format_swizzle_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported image format component swizzle into channel"] -pub use self::_ze_image_format_swizzle_t as ze_image_format_swizzle_t; -#[doc = ""] -#[doc = " @brief Image format"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_image_format_t { - #[doc = "< [in] image format component layout"] - pub layout: ze_image_format_layout_t, - #[doc = "< [in] image format type. Media formats can't be used for"] - #[doc = "< ::ZE_IMAGE_TYPE_BUFFER."] - pub type_: ze_image_format_type_t, - #[doc = "< [in] image component swizzle into channel x"] - pub x: ze_image_format_swizzle_t, - #[doc = "< [in] image component swizzle into channel y"] - pub y: ze_image_format_swizzle_t, - #[doc = "< [in] image component swizzle into channel z"] - pub z: ze_image_format_swizzle_t, - #[doc = "< [in] image component swizzle into channel w"] - pub w: ze_image_format_swizzle_t, -} -#[test] -fn bindgen_test_layout__ze_image_format_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_format_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_image_format_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_format_t>(), - 4usize, - concat!("Alignment of ", stringify!(_ze_image_format_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_format_t>())).layout as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_format_t), - "::", - stringify!(layout) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_format_t>())).type_ as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_format_t), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_format_t>())).x as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_format_t), - "::", - stringify!(x) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_format_t>())).y as *const _ as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_format_t), - "::", - stringify!(y) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_format_t>())).z as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_format_t), - "::", - stringify!(z) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_format_t>())).w as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_format_t), - "::", - stringify!(w) - ) - ); -} -#[doc = ""] -#[doc = " @brief Image descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_image_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] creation flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_image_flags_t;"] - #[doc = "< default is read-only, cached access."] - pub flags: ze_image_flags_t, - #[doc = "< [in] image type"] - pub type_: ze_image_type_t, - #[doc = "< [in] image format"] - pub format: ze_image_format_t, - #[doc = "< [in] width dimension."] - #[doc = "< ::ZE_IMAGE_TYPE_BUFFER: size in bytes; see"] - #[doc = "< ::ze_device_image_properties_t.maxImageBufferSize for limits."] - #[doc = "< ::ZE_IMAGE_TYPE_1D, ::ZE_IMAGE_TYPE_1DARRAY: width in pixels; see"] - #[doc = "< ::ze_device_image_properties_t.maxImageDims1D for limits."] - #[doc = "< ::ZE_IMAGE_TYPE_2D, ::ZE_IMAGE_TYPE_2DARRAY: width in pixels; see"] - #[doc = "< ::ze_device_image_properties_t.maxImageDims2D for limits."] - #[doc = "< ::ZE_IMAGE_TYPE_3D: width in pixels; see"] - #[doc = "< ::ze_device_image_properties_t.maxImageDims3D for limits."] - pub width: u64, - #[doc = "< [in] height dimension."] - #[doc = "< ::ZE_IMAGE_TYPE_2D, ::ZE_IMAGE_TYPE_2DARRAY: height in pixels; see"] - #[doc = "< ::ze_device_image_properties_t.maxImageDims2D for limits."] - #[doc = "< ::ZE_IMAGE_TYPE_3D: height in pixels; see"] - #[doc = "< ::ze_device_image_properties_t.maxImageDims3D for limits."] - #[doc = "< other: ignored."] - pub height: u32, - #[doc = "< [in] depth dimension."] - #[doc = "< ::ZE_IMAGE_TYPE_3D: depth in pixels; see"] - #[doc = "< ::ze_device_image_properties_t.maxImageDims3D for limits."] - #[doc = "< other: ignored."] - pub depth: u32, - #[doc = "< [in] array levels."] - #[doc = "< ::ZE_IMAGE_TYPE_1DARRAY, ::ZE_IMAGE_TYPE_2DARRAY: see"] - #[doc = "< ::ze_device_image_properties_t.maxImageArraySlices for limits."] - #[doc = "< other: ignored."] - pub arraylevels: u32, - #[doc = "< [in] mipmap levels (must be 0)"] - pub miplevels: u32, -} -#[test] -fn bindgen_test_layout__ze_image_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_desc_t>(), - 72usize, - concat!("Size of: ", stringify!(_ze_image_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_image_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).type_ as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).format as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(format) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).width as *const _ as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(width) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).height as *const _ as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(height) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).depth as *const _ as usize }, - 60usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(depth) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).arraylevels as *const _ as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(arraylevels) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_desc_t>())).miplevels as *const _ as usize }, - 68usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_desc_t), - "::", - stringify!(miplevels) - ) - ); -} -impl _ze_image_sampler_filter_flags_t { - #[doc = "< device supports point filtering"] - pub const ZE_IMAGE_SAMPLER_FILTER_FLAG_POINT: _ze_image_sampler_filter_flags_t = - _ze_image_sampler_filter_flags_t(1); -} -impl _ze_image_sampler_filter_flags_t { - #[doc = "< device supports linear filtering"] - pub const ZE_IMAGE_SAMPLER_FILTER_FLAG_LINEAR: _ze_image_sampler_filter_flags_t = - _ze_image_sampler_filter_flags_t(2); -} -impl _ze_image_sampler_filter_flags_t { - pub const ZE_IMAGE_SAMPLER_FILTER_FLAG_FORCE_UINT32: _ze_image_sampler_filter_flags_t = - _ze_image_sampler_filter_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_image_sampler_filter_flags_t> for _ze_image_sampler_filter_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_image_sampler_filter_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_image_sampler_filter_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_image_sampler_filter_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_image_sampler_filter_flags_t> for _ze_image_sampler_filter_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_image_sampler_filter_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_image_sampler_filter_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_image_sampler_filter_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported sampler filtering flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_image_sampler_filter_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported sampler filtering flags"] -pub use self::_ze_image_sampler_filter_flags_t as ze_image_sampler_filter_flags_t; -#[doc = ""] -#[doc = " @brief Image properties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_image_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] supported sampler filtering."] - #[doc = "< returns 0 (unsupported) or a combination of ::ze_image_sampler_filter_flags_t."] - pub samplerFilterFlags: ze_image_sampler_filter_flags_t, -} -#[test] -fn bindgen_test_layout__ze_image_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_properties_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_image_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_image_properties_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_properties_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_properties_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_properties_t>())).samplerFilterFlags as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_properties_t), - "::", - stringify!(samplerFilterFlags) - ) - ); -} -impl Default for _ze_image_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves supported properties of an image."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == pImageProperties`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x3 < desc->flags`"] - #[doc = " + `::ZE_IMAGE_TYPE_BUFFER < desc->type`"] - pub fn zeImageGetProperties( - hDevice: ze_device_handle_t, - desc: *const ze_image_desc_t, - pImageProperties: *mut ze_image_properties_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates an image on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must only use the image for the device, or its"] - #[doc = " sub-devices, which was provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @remarks"] - #[doc = " _Analogues_"] - #[doc = " - clCreateImage"] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phImage`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x3 < desc->flags`"] - #[doc = " + `::ZE_IMAGE_TYPE_BUFFER < desc->type`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_IMAGE_FORMAT"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeImageCreate( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - desc: *const ze_image_desc_t, - phImage: *mut ze_image_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Deletes an image object."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the image before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this image."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same image handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hImage`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeImageDestroy(hImage: ze_image_handle_t) -> ze_result_t; -} -impl _ze_device_mem_alloc_flags_t { - #[doc = "< device should cache allocation"] - pub const ZE_DEVICE_MEM_ALLOC_FLAG_BIAS_CACHED: _ze_device_mem_alloc_flags_t = - _ze_device_mem_alloc_flags_t(1); -} -impl _ze_device_mem_alloc_flags_t { - #[doc = "< device should not cache allocation (UC)"] - pub const ZE_DEVICE_MEM_ALLOC_FLAG_BIAS_UNCACHED: _ze_device_mem_alloc_flags_t = - _ze_device_mem_alloc_flags_t(2); -} -impl _ze_device_mem_alloc_flags_t { - pub const ZE_DEVICE_MEM_ALLOC_FLAG_FORCE_UINT32: _ze_device_mem_alloc_flags_t = - _ze_device_mem_alloc_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_mem_alloc_flags_t> for _ze_device_mem_alloc_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_mem_alloc_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_mem_alloc_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_mem_alloc_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_mem_alloc_flags_t> for _ze_device_mem_alloc_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_mem_alloc_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_mem_alloc_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_mem_alloc_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported memory allocation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_mem_alloc_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported memory allocation flags"] -pub use self::_ze_device_mem_alloc_flags_t as ze_device_mem_alloc_flags_t; -#[doc = ""] -#[doc = " @brief Device memory allocation descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_mem_alloc_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] flags specifying additional allocation controls."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_device_mem_alloc_flags_t;"] - #[doc = "< default behavior may use implicit driver-based heuristics."] - pub flags: ze_device_mem_alloc_flags_t, - #[doc = "< [in] ordinal of the device's local memory to allocate from."] - #[doc = "< must be less than the count returned from ::zeDeviceGetMemoryProperties."] - pub ordinal: u32, -} -#[test] -fn bindgen_test_layout__ze_device_mem_alloc_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_mem_alloc_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_device_mem_alloc_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_mem_alloc_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_mem_alloc_desc_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_mem_alloc_desc_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_mem_alloc_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_mem_alloc_desc_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_mem_alloc_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_mem_alloc_desc_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_mem_alloc_desc_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_mem_alloc_desc_t>())).ordinal as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_mem_alloc_desc_t), - "::", - stringify!(ordinal) - ) - ); -} -impl _ze_host_mem_alloc_flags_t { - #[doc = "< host should cache allocation"] - pub const ZE_HOST_MEM_ALLOC_FLAG_BIAS_CACHED: _ze_host_mem_alloc_flags_t = - _ze_host_mem_alloc_flags_t(1); -} -impl _ze_host_mem_alloc_flags_t { - #[doc = "< host should not cache allocation (UC)"] - pub const ZE_HOST_MEM_ALLOC_FLAG_BIAS_UNCACHED: _ze_host_mem_alloc_flags_t = - _ze_host_mem_alloc_flags_t(2); -} -impl _ze_host_mem_alloc_flags_t { - #[doc = "< host memory should be allocated write-combined (WC)"] - pub const ZE_HOST_MEM_ALLOC_FLAG_BIAS_WRITE_COMBINED: _ze_host_mem_alloc_flags_t = - _ze_host_mem_alloc_flags_t(4); -} -impl _ze_host_mem_alloc_flags_t { - pub const ZE_HOST_MEM_ALLOC_FLAG_FORCE_UINT32: _ze_host_mem_alloc_flags_t = - _ze_host_mem_alloc_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_host_mem_alloc_flags_t> for _ze_host_mem_alloc_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_host_mem_alloc_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_host_mem_alloc_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_host_mem_alloc_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_host_mem_alloc_flags_t> for _ze_host_mem_alloc_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_host_mem_alloc_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_host_mem_alloc_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_host_mem_alloc_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported host memory allocation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_host_mem_alloc_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported host memory allocation flags"] -pub use self::_ze_host_mem_alloc_flags_t as ze_host_mem_alloc_flags_t; -#[doc = ""] -#[doc = " @brief Host memory allocation descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_host_mem_alloc_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] flags specifying additional allocation controls."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_host_mem_alloc_flags_t;"] - #[doc = "< default behavior may use implicit driver-based heuristics."] - pub flags: ze_host_mem_alloc_flags_t, -} -#[test] -fn bindgen_test_layout__ze_host_mem_alloc_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_host_mem_alloc_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_host_mem_alloc_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_host_mem_alloc_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_host_mem_alloc_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_host_mem_alloc_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_host_mem_alloc_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_host_mem_alloc_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_host_mem_alloc_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_host_mem_alloc_desc_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_host_mem_alloc_desc_t), - "::", - stringify!(flags) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Allocates shared memory on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Shared allocations share ownership between the host and one or more"] - #[doc = " devices."] - #[doc = " - Shared allocations may optionally be associated with a device by"] - #[doc = " passing a handle to the device."] - #[doc = " - Devices supporting only single-device shared access capabilities may"] - #[doc = " access shared memory associated with the device."] - #[doc = " For these devices, ownership of the allocation is shared between the"] - #[doc = " host and the associated device only."] - #[doc = " - Passing nullptr as the device handle does not associate the shared"] - #[doc = " allocation with any device."] - #[doc = " For allocations with no associated device, ownership of the allocation"] - #[doc = " is shared between the host and all devices supporting cross-device"] - #[doc = " shared access capabilities."] - #[doc = " - The application must only use the memory allocation for the context"] - #[doc = " and device, or its sub-devices, which was provided during allocation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == device_desc`"] - #[doc = " + `nullptr == host_desc`"] - #[doc = " + `nullptr == pptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x3 < device_desc->flags`"] - #[doc = " + `0x7 < host_desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT"] - #[doc = " + Must be zero or a power-of-two"] - #[doc = " + `0 != (alignment & (alignment - 1))`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeMemAllocShared( - hContext: ze_context_handle_t, - device_desc: *const ze_device_mem_alloc_desc_t, - host_desc: *const ze_host_mem_alloc_desc_t, - size: usize, - alignment: usize, - hDevice: ze_device_handle_t, - pptr: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Allocates device memory on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Device allocations are owned by a specific device."] - #[doc = " - In general, a device allocation may only be accessed by the device"] - #[doc = " that owns it."] - #[doc = " - The application must only use the memory allocation for the context"] - #[doc = " and device, or its sub-devices, which was provided during allocation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == device_desc`"] - #[doc = " + `nullptr == pptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x3 < device_desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT"] - #[doc = " + Must be zero or a power-of-two"] - #[doc = " + `0 != (alignment & (alignment - 1))`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeMemAllocDevice( - hContext: ze_context_handle_t, - device_desc: *const ze_device_mem_alloc_desc_t, - size: usize, - alignment: usize, - hDevice: ze_device_handle_t, - pptr: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Allocates host memory on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Host allocations are owned by the host process."] - #[doc = " - Host allocations are accessible by the host and all devices within the"] - #[doc = " driver's context."] - #[doc = " - Host allocations are frequently used as staging areas to transfer data"] - #[doc = " to or from devices."] - #[doc = " - The application must only use the memory allocation for the context"] - #[doc = " which was provided during allocation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == host_desc`"] - #[doc = " + `nullptr == pptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x7 < host_desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT"] - #[doc = " + Must be zero or a power-of-two"] - #[doc = " + `0 != (alignment & (alignment - 1))`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeMemAllocHost( - hContext: ze_context_handle_t, - host_desc: *const ze_host_mem_alloc_desc_t, - size: usize, - alignment: usize, - pptr: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Frees allocated host memory, device memory, or shared memory on the"] - #[doc = " context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the memory before it is freed"] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this memory"] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same pointer."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - pub fn zeMemFree( - hContext: ze_context_handle_t, - ptr: *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -impl _ze_memory_type_t { - #[doc = "< the memory pointed to is of unknown type"] - pub const ZE_MEMORY_TYPE_UNKNOWN: _ze_memory_type_t = _ze_memory_type_t(0); -} -impl _ze_memory_type_t { - #[doc = "< the memory pointed to is a host allocation"] - pub const ZE_MEMORY_TYPE_HOST: _ze_memory_type_t = _ze_memory_type_t(1); -} -impl _ze_memory_type_t { - #[doc = "< the memory pointed to is a device allocation"] - pub const ZE_MEMORY_TYPE_DEVICE: _ze_memory_type_t = _ze_memory_type_t(2); -} -impl _ze_memory_type_t { - #[doc = "< the memory pointed to is a shared ownership allocation"] - pub const ZE_MEMORY_TYPE_SHARED: _ze_memory_type_t = _ze_memory_type_t(3); -} -impl _ze_memory_type_t { - pub const ZE_MEMORY_TYPE_FORCE_UINT32: _ze_memory_type_t = _ze_memory_type_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Memory allocation type"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_memory_type_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Memory allocation type"] -pub use self::_ze_memory_type_t as ze_memory_type_t; -#[doc = ""] -#[doc = " @brief Memory allocation properties queried using ::zeMemGetAllocProperties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_memory_allocation_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] type of allocated memory"] - pub type_: ze_memory_type_t, - #[doc = "< [out] identifier for this allocation"] - pub id: u64, - #[doc = "< [out] page size used for allocation"] - pub pageSize: u64, -} -#[test] -fn bindgen_test_layout__ze_memory_allocation_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_memory_allocation_properties_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_memory_allocation_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_memory_allocation_properties_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_memory_allocation_properties_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_memory_allocation_properties_t>())).stype as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_memory_allocation_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_memory_allocation_properties_t>())).pNext as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_memory_allocation_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_memory_allocation_properties_t>())).type_ as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_memory_allocation_properties_t), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_memory_allocation_properties_t>())).id as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_memory_allocation_properties_t), - "::", - stringify!(id) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_memory_allocation_properties_t>())).pageSize as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_memory_allocation_properties_t), - "::", - stringify!(pageSize) - ) - ); -} -impl Default for _ze_memory_allocation_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves attributes of a memory allocation"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The application may query attributes of a memory allocation unrelated"] - #[doc = " to the context."] - #[doc = " When this occurs, the returned allocation type will be"] - #[doc = " ::ZE_MEMORY_TYPE_UNKNOWN, and the returned identifier and associated"] - #[doc = " device is unspecified."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " + `nullptr == pMemAllocProperties`"] - pub fn zeMemGetAllocProperties( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - pMemAllocProperties: *mut ze_memory_allocation_properties_t, - phDevice: *mut ze_device_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves the base address and/or size of an allocation"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - pub fn zeMemGetAddressRange( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - pBase: *mut *mut ::std::os::raw::c_void, - pSize: *mut usize, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates an IPC memory handle for the specified allocation"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Takes a pointer to a device memory allocation and creates an IPC"] - #[doc = " memory handle for exporting it for use in another process."] - #[doc = " - The pointer must be base pointer of the device memory allocation; i.e."] - #[doc = " the value returned from ::zeMemAllocDevice."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " + `nullptr == pIpcHandle`"] - pub fn zeMemGetIpcHandle( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - pIpcHandle: *mut ze_ipc_mem_handle_t, - ) -> ze_result_t; -} -impl _ze_ipc_memory_flags_t { - #[doc = "< reserved for future use"] - pub const ZE_IPC_MEMORY_FLAG_TBD: _ze_ipc_memory_flags_t = _ze_ipc_memory_flags_t(1); -} -impl _ze_ipc_memory_flags_t { - pub const ZE_IPC_MEMORY_FLAG_FORCE_UINT32: _ze_ipc_memory_flags_t = - _ze_ipc_memory_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_ipc_memory_flags_t> for _ze_ipc_memory_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_ipc_memory_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_ipc_memory_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_ipc_memory_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_ipc_memory_flags_t> for _ze_ipc_memory_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_ipc_memory_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_ipc_memory_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_ipc_memory_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported IPC memory flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_ipc_memory_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported IPC memory flags"] -pub use self::_ze_ipc_memory_flags_t as ze_ipc_memory_flags_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Opens an IPC memory handle to retrieve a device pointer on the"] - #[doc = " context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Takes an IPC memory handle from a remote process and associates it"] - #[doc = " with a device pointer usable in this process."] - #[doc = " - The device pointer in this process should not be freed with"] - #[doc = " ::zeMemFree, but rather with ::zeMemCloseIpcHandle."] - #[doc = " - Multiple calls to this function with the same IPC handle will return"] - #[doc = " unique pointers."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < flags`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pptr`"] - pub fn zeMemOpenIpcHandle( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - handle: ze_ipc_mem_handle_t, - flags: ze_ipc_memory_flags_t, - pptr: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Closes an IPC memory handle"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Closes an IPC memory handle by unmapping memory that was opened in"] - #[doc = " this process using ::zeMemOpenIpcHandle."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same pointer."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - pub fn zeMemCloseIpcHandle( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Additional allocation descriptor for exporting external memory"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - This structure may be passed to ::zeMemAllocDevice, via the `pNext`"] -#[doc = " member of ::ze_device_mem_alloc_desc_t, to indicate an exportable"] -#[doc = " memory allocation."] -#[doc = " - This structure may be passed to ::zeImageCreate, via the `pNext`"] -#[doc = " member of ::ze_image_desc_t, to indicate an exportable image."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_external_memory_export_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] flags specifying memory export types for this allocation."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_external_memory_type_flags_t"] - pub flags: ze_external_memory_type_flags_t, -} -#[test] -fn bindgen_test_layout__ze_external_memory_export_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_external_memory_export_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_external_memory_export_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_external_memory_export_desc_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_external_memory_export_desc_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_export_desc_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_export_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_export_desc_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_export_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_export_desc_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_export_desc_t), - "::", - stringify!(flags) - ) - ); -} -#[doc = ""] -#[doc = " @brief Additional allocation descriptor for importing external memory as a"] -#[doc = " file descriptor"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - This structure may be passed to ::zeMemAllocDevice, via the `pNext`"] -#[doc = " member of ::ze_device_mem_alloc_desc_t, to import memory from a file"] -#[doc = " descriptor."] -#[doc = " - This structure may be passed to ::zeImageCreate, via the `pNext`"] -#[doc = " member of ::ze_image_desc_t, to import memory from a file descriptor."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_external_memory_import_fd_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] flags specifying the memory import type for the file descriptor."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_external_memory_type_flags_t"] - pub flags: ze_external_memory_type_flags_t, - #[doc = "< [in] the file descriptor handle to import"] - pub fd: ::std::os::raw::c_int, -} -#[test] -fn bindgen_test_layout__ze_external_memory_import_fd_t() { - assert_eq!( - ::std::mem::size_of::<_ze_external_memory_import_fd_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_external_memory_import_fd_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_external_memory_import_fd_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_external_memory_import_fd_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_import_fd_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_import_fd_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_import_fd_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_import_fd_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_import_fd_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_import_fd_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_import_fd_t>())).fd as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_import_fd_t), - "::", - stringify!(fd) - ) - ); -} -#[doc = ""] -#[doc = " @brief Exports an allocation as a file descriptor"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - This structure may be passed to ::zeMemGetAllocProperties, via the"] -#[doc = " `pNext` member of ::ze_memory_allocation_properties_t, to export a"] -#[doc = " memory allocation as a file descriptor."] -#[doc = " - This structure may be passed to ::zeImageGetProperties, via the"] -#[doc = " `pNext` member of ::ze_image_properties_t, to export an image as a"] -#[doc = " file descriptor."] -#[doc = " - The requested memory export type must have been specified when the"] -#[doc = " allocation was made."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_external_memory_export_fd_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] flags specifying the memory export type for the file descriptor."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_external_memory_type_flags_t"] - pub flags: ze_external_memory_type_flags_t, - #[doc = "< [out] the exported file descriptor handle representing the allocation."] - pub fd: ::std::os::raw::c_int, -} -#[test] -fn bindgen_test_layout__ze_external_memory_export_fd_t() { - assert_eq!( - ::std::mem::size_of::<_ze_external_memory_export_fd_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_external_memory_export_fd_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_external_memory_export_fd_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_external_memory_export_fd_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_export_fd_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_export_fd_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_export_fd_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_export_fd_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_export_fd_t>())).flags as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_export_fd_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_external_memory_export_fd_t>())).fd as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_external_memory_export_fd_t), - "::", - stringify!(fd) - ) - ); -} -impl _ze_module_format_t { - #[doc = "< Format is SPIRV IL format"] - pub const ZE_MODULE_FORMAT_IL_SPIRV: _ze_module_format_t = _ze_module_format_t(0); -} -impl _ze_module_format_t { - #[doc = "< Format is device native format"] - pub const ZE_MODULE_FORMAT_NATIVE: _ze_module_format_t = _ze_module_format_t(1); -} -impl _ze_module_format_t { - pub const ZE_MODULE_FORMAT_FORCE_UINT32: _ze_module_format_t = _ze_module_format_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported module creation input formats"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_module_format_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported module creation input formats"] -pub use self::_ze_module_format_t as ze_module_format_t; -#[doc = ""] -#[doc = " @brief Specialization constants - User defined constants"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_constants_t { - #[doc = "< [in] Number of specialization constants."] - pub numConstants: u32, - #[doc = "< [in][range(0, numConstants)] Array of IDs that is sized to"] - #[doc = "< numConstants."] - pub pConstantIds: *const u32, - #[doc = "< [in][range(0, numConstants)] Array of pointers to values that is sized"] - #[doc = "< to numConstants."] - pub pConstantValues: *mut *const ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_module_constants_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_constants_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_module_constants_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_constants_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_module_constants_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_constants_t>())).numConstants as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_constants_t), - "::", - stringify!(numConstants) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_constants_t>())).pConstantIds as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_constants_t), - "::", - stringify!(pConstantIds) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_constants_t>())).pConstantValues as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_constants_t), - "::", - stringify!(pConstantValues) - ) - ); -} -#[doc = ""] -#[doc = " @brief Module descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] Module format passed in with pInputModule"] - pub format: ze_module_format_t, - #[doc = "< [in] size of input IL or ISA from pInputModule."] - pub inputSize: usize, - #[doc = "< [in] pointer to IL or ISA"] - pub pInputModule: *const u8, - #[doc = "< [in][optional] string containing compiler flags. Following options are supported."] - #[doc = "< - \"-ze-opt-disable\""] - #[doc = "< - Disable optimizations"] - #[doc = "< - \"-ze-opt-greater-than-4GB-buffer-required\""] - #[doc = "< - Use 64-bit offset calculations for buffers."] - #[doc = "< - \"-ze-opt-large-register-file\""] - #[doc = "< - Increase number of registers available to threads."] - #[doc = "< - \"-ze-opt-has-buffer-offset-arg\""] - #[doc = "< - Extend stateless to stateful optimization to more"] - #[doc = "< cases with the use of additional offset (e.g. 64-bit"] - #[doc = "< pointer to binding table with 32-bit offset)."] - #[doc = "< - \"-g\""] - #[doc = "< - Include debugging information."] - pub pBuildFlags: *const ::std::os::raw::c_char, - #[doc = "< [in][optional] pointer to specialization constants. Valid only for"] - #[doc = "< SPIR-V input. This must be set to nullptr if no specialization"] - #[doc = "< constants are provided."] - pub pConstants: *const ze_module_constants_t, -} -#[test] -fn bindgen_test_layout__ze_module_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_desc_t>(), - 56usize, - concat!("Size of: ", stringify!(_ze_module_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_module_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_desc_t>())).format as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_desc_t), - "::", - stringify!(format) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_desc_t>())).inputSize as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_desc_t), - "::", - stringify!(inputSize) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_desc_t>())).pInputModule as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_desc_t), - "::", - stringify!(pInputModule) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_desc_t>())).pBuildFlags as *const _ as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_desc_t), - "::", - stringify!(pBuildFlags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_desc_t>())).pConstants as *const _ as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_desc_t), - "::", - stringify!(pConstants) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a module on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Compiles the module for execution on the device."] - #[doc = " - The application must only use the module for the device, or its"] - #[doc = " sub-devices, which was provided during creation."] - #[doc = " - The module can be copied to other devices and contexts within the same"] - #[doc = " driver instance by using ::zeModuleGetNativeBinary."] - #[doc = " - A build log can optionally be returned to the caller. The caller is"] - #[doc = " responsible for destroying build log using ::zeModuleBuildLogDestroy."] - #[doc = " - The module descriptor constants are only supported for SPIR-V"] - #[doc = " specialization constants."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == desc->pInputModule`"] - #[doc = " + `nullptr == phModule`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `::ZE_MODULE_FORMAT_NATIVE < desc->format`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NATIVE_BINARY"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `0 == desc->inputSize`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_MODULE_BUILD_FAILURE"] - pub fn zeModuleCreate( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - desc: *const ze_module_desc_t, - phModule: *mut ze_module_handle_t, - phBuildLog: *mut ze_module_build_log_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys module"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must destroy all kernel and build log handles created"] - #[doc = " from the module before destroying the module itself."] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the module before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this module."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same module handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModule`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeModuleDestroy(hModule: ze_module_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Dynamically link modules together that share import/export linkage"] - #[doc = " dependencies."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Modules support import and export linkage for functions and global"] - #[doc = " variables."] - #[doc = " - Modules that have imports can be dynamically linked to export modules"] - #[doc = " that satisfy those import requirements."] - #[doc = " - Modules can have both import and export linkages."] - #[doc = " - Modules that do not have any imports or exports do not need to be"] - #[doc = " linked."] - #[doc = " - Modules cannot be partially linked. All modules needed to satisfy all"] - #[doc = " import dependencies for a module must be passed in or"] - #[doc = " ::ZE_RESULT_ERROR_MODULE_LINK_FAILURE will returned."] - #[doc = " - Modules with imports need to be linked before kernel objects can be"] - #[doc = " created from them."] - #[doc = " - Modules will only be linked once. A module can be used in multiple"] - #[doc = " link calls if it has exports but it's imports will not be re-linked."] - #[doc = " - Ambiguous dependencies, where multiple modules satisfy the import"] - #[doc = " dependencies for another module, is not allowed."] - #[doc = " - ModuleGetNativeBinary can be called on any module regardless of"] - #[doc = " whether it is linked or not."] - #[doc = " - A link log can optionally be returned to the caller. The caller is"] - #[doc = " responsible for destroying build log using ::zeModuleBuildLogDestroy."] - #[doc = " - See SPIR-V specification for linkage details."] - #[doc = " - The application must ensure the modules being linked were created on"] - #[doc = " the same context."] - #[doc = " - The application may call this function from simultaneous threads as"] - #[doc = " long as the import modules being linked are not the same."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == phModules`"] - #[doc = " - ::ZE_RESULT_ERROR_MODULE_LINK_FAILURE"] - pub fn zeModuleDynamicLink( - numModules: u32, - phModules: *mut ze_module_handle_t, - phLinkLog: *mut ze_module_build_log_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys module build log object"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The implementation of this function may immediately free all Host"] - #[doc = " allocations associated with this object."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same build log handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = " - This function can be called before or after ::zeModuleDestroy for the"] - #[doc = " associated module."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModuleBuildLog`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeModuleBuildLogDestroy(hModuleBuildLog: ze_module_build_log_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieves text string for build log."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The caller can pass nullptr for pBuildLog when querying only for size."] - #[doc = " - The caller must provide memory for build log."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModuleBuildLog`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pSize`"] - pub fn zeModuleBuildLogGetString( - hModuleBuildLog: ze_module_build_log_handle_t, - pSize: *mut usize, - pBuildLog: *mut ::std::os::raw::c_char, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve native binary from Module."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The native binary output can be cached to disk and new modules can be"] - #[doc = " later constructed from the cached copy."] - #[doc = " - The native binary will retain debugging information that is associated"] - #[doc = " with a module."] - #[doc = " - The caller can pass nullptr for pModuleNativeBinary when querying only"] - #[doc = " for size."] - #[doc = " - The implementation will copy the native binary into a buffer supplied"] - #[doc = " by the caller."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModule`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pSize`"] - pub fn zeModuleGetNativeBinary( - hModule: ze_module_handle_t, - pSize: *mut usize, - pModuleNativeBinary: *mut u8, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve global variable pointer from Module."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may query global pointer from any module that either"] - #[doc = " exports or imports it."] - #[doc = " - The application must dynamically link a module that imports a global"] - #[doc = " before the global pointer can be queried from it."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModule`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pGlobalName`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_GLOBAL_NAME"] - pub fn zeModuleGetGlobalPointer( - hModule: ze_module_handle_t, - pGlobalName: *const ::std::os::raw::c_char, - pSize: *mut usize, - pptr: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve all kernel names in the module."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModule`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pCount`"] - pub fn zeModuleGetKernelNames( - hModule: ze_module_handle_t, - pCount: *mut u32, - pNames: *mut *const ::std::os::raw::c_char, - ) -> ze_result_t; -} -impl _ze_module_property_flags_t { - #[doc = "< Module has imports (i.e. imported global variables and/or kernels)."] - #[doc = "< See ::zeModuleDynamicLink."] - pub const ZE_MODULE_PROPERTY_FLAG_IMPORTS: _ze_module_property_flags_t = - _ze_module_property_flags_t(1); -} -impl _ze_module_property_flags_t { - pub const ZE_MODULE_PROPERTY_FLAG_FORCE_UINT32: _ze_module_property_flags_t = - _ze_module_property_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_module_property_flags_t> for _ze_module_property_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_module_property_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_module_property_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_module_property_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_module_property_flags_t> for _ze_module_property_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_module_property_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_module_property_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_module_property_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported module property flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_module_property_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported module property flags"] -pub use self::_ze_module_property_flags_t as ze_module_property_flags_t; -#[doc = ""] -#[doc = " @brief Module properties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] 0 (none) or a valid combination of ::ze_module_property_flags_t"] - pub flags: ze_module_property_flags_t, -} -#[test] -fn bindgen_test_layout__ze_module_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_properties_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_module_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_module_properties_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_properties_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_properties_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_module_properties_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_properties_t), - "::", - stringify!(flags) - ) - ); -} -impl Default for _ze_module_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve module properties."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModule`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pModuleProperties`"] - pub fn zeModuleGetProperties( - hModule: ze_module_handle_t, - pModuleProperties: *mut ze_module_properties_t, - ) -> ze_result_t; -} -impl _ze_kernel_flags_t { - #[doc = "< force all device allocations to be resident during execution"] - pub const ZE_KERNEL_FLAG_FORCE_RESIDENCY: _ze_kernel_flags_t = _ze_kernel_flags_t(1); -} -impl _ze_kernel_flags_t { - #[doc = "< application is responsible for all residency of device allocations."] - #[doc = "< driver may disable implicit residency management."] - pub const ZE_KERNEL_FLAG_EXPLICIT_RESIDENCY: _ze_kernel_flags_t = _ze_kernel_flags_t(2); -} -impl _ze_kernel_flags_t { - pub const ZE_KERNEL_FLAG_FORCE_UINT32: _ze_kernel_flags_t = _ze_kernel_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_kernel_flags_t> for _ze_kernel_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_kernel_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_kernel_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_kernel_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_kernel_flags_t> for _ze_kernel_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_kernel_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_kernel_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_kernel_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported kernel creation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_kernel_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported kernel creation flags"] -pub use self::_ze_kernel_flags_t as ze_kernel_flags_t; -#[doc = ""] -#[doc = " @brief Kernel descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] creation flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_kernel_flags_t;"] - #[doc = "< default behavior may use driver-based residency."] - pub flags: ze_kernel_flags_t, - #[doc = "< [in] null-terminated name of kernel in module"] - pub pKernelName: *const ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout__ze_kernel_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_desc_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_kernel_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_desc_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_desc_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_desc_t>())).pKernelName as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_desc_t), - "::", - stringify!(pKernelName) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Create a kernel from the module."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Modules that have unresolved imports need to be dynamically linked"] - #[doc = " before a kernel can be created from them. (See ::zeModuleDynamicLink)"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModule`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == desc->pKernelName`"] - #[doc = " + `nullptr == phKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x3 < desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_KERNEL_NAME"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_MODULE_UNLINKED"] - pub fn zeKernelCreate( - hModule: ze_module_handle_t, - desc: *const ze_kernel_desc_t, - phKernel: *mut ze_kernel_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys a kernel object"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the kernel before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this kernel."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same kernel handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeKernelDestroy(hKernel: ze_kernel_handle_t) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve a function pointer from a module by name"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The function pointer is unique for the device on which the module was"] - #[doc = " created."] - #[doc = " - The function pointer is no longer valid if module is destroyed."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hModule`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pFunctionName`"] - #[doc = " + `nullptr == pfnFunction`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_FUNCTION_NAME"] - pub fn zeModuleGetFunctionPointer( - hModule: ze_module_handle_t, - pFunctionName: *const ::std::os::raw::c_char, - pfnFunction: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Set group size for a kernel on the current Host thread."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The group size will be used when a ::zeCommandListAppendLaunchKernel"] - #[doc = " variant is called."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same kernel handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_GROUP_SIZE_DIMENSION"] - pub fn zeKernelSetGroupSize( - hKernel: ze_kernel_handle_t, - groupSizeX: u32, - groupSizeY: u32, - groupSizeZ: u32, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Query a suggested group size for a kernel given a global size for each"] - #[doc = " dimension."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - This function ignores the group size that is set using"] - #[doc = " ::zeKernelSetGroupSize."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == groupSizeX`"] - #[doc = " + `nullptr == groupSizeY`"] - #[doc = " + `nullptr == groupSizeZ`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_GLOBAL_WIDTH_DIMENSION"] - pub fn zeKernelSuggestGroupSize( - hKernel: ze_kernel_handle_t, - globalSizeX: u32, - globalSizeY: u32, - globalSizeZ: u32, - groupSizeX: *mut u32, - groupSizeY: *mut u32, - groupSizeZ: *mut u32, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Query a suggested max group count for a cooperative kernel."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == totalGroupCount`"] - pub fn zeKernelSuggestMaxCooperativeGroupCount( - hKernel: ze_kernel_handle_t, - totalGroupCount: *mut u32, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Set kernel argument for a kernel on the current Host thread."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The argument values will be used when a"] - #[doc = " ::zeCommandListAppendLaunchKernel variant is called."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same kernel handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_INDEX"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_KERNEL_ARGUMENT_SIZE"] - pub fn zeKernelSetArgumentValue( - hKernel: ze_kernel_handle_t, - argIndex: u32, - argSize: usize, - pArgValue: *const ::std::os::raw::c_void, - ) -> ze_result_t; -} -impl _ze_kernel_indirect_access_flags_t { - #[doc = "< Indicates that the kernel accesses host allocations indirectly."] - pub const ZE_KERNEL_INDIRECT_ACCESS_FLAG_HOST: _ze_kernel_indirect_access_flags_t = - _ze_kernel_indirect_access_flags_t(1); -} -impl _ze_kernel_indirect_access_flags_t { - #[doc = "< Indicates that the kernel accesses device allocations indirectly."] - pub const ZE_KERNEL_INDIRECT_ACCESS_FLAG_DEVICE: _ze_kernel_indirect_access_flags_t = - _ze_kernel_indirect_access_flags_t(2); -} -impl _ze_kernel_indirect_access_flags_t { - #[doc = "< Indicates that the kernel accesses shared allocations indirectly."] - pub const ZE_KERNEL_INDIRECT_ACCESS_FLAG_SHARED: _ze_kernel_indirect_access_flags_t = - _ze_kernel_indirect_access_flags_t(4); -} -impl _ze_kernel_indirect_access_flags_t { - pub const ZE_KERNEL_INDIRECT_ACCESS_FLAG_FORCE_UINT32: _ze_kernel_indirect_access_flags_t = - _ze_kernel_indirect_access_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_kernel_indirect_access_flags_t> for _ze_kernel_indirect_access_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_kernel_indirect_access_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_kernel_indirect_access_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_kernel_indirect_access_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_kernel_indirect_access_flags_t> for _ze_kernel_indirect_access_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_kernel_indirect_access_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_kernel_indirect_access_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_kernel_indirect_access_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Kernel indirect access flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_kernel_indirect_access_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Kernel indirect access flags"] -pub use self::_ze_kernel_indirect_access_flags_t as ze_kernel_indirect_access_flags_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Sets kernel indirect access flags."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application should specify which allocations will be indirectly"] - #[doc = " accessed by the kernel to allow driver to optimize which allocations"] - #[doc = " are made resident"] - #[doc = " - This function may **not** be called from simultaneous threads with the"] - #[doc = " same Kernel handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x7 < flags`"] - pub fn zeKernelSetIndirectAccess( - hKernel: ze_kernel_handle_t, - flags: ze_kernel_indirect_access_flags_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve kernel indirect access flags."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - This function may be called from simultaneous threads with the same"] - #[doc = " Kernel handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pFlags`"] - pub fn zeKernelGetIndirectAccess( - hKernel: ze_kernel_handle_t, - pFlags: *mut ze_kernel_indirect_access_flags_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve all declared kernel attributes (i.e. can be specified with"] - #[doc = " __attribute__ in runtime language)."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - This function may be called from simultaneous threads with the same"] - #[doc = " Kernel handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pSize`"] - #[doc = " + `nullptr == pString`"] - pub fn zeKernelGetSourceAttributes( - hKernel: ze_kernel_handle_t, - pSize: *mut u32, - pString: *mut *mut ::std::os::raw::c_char, - ) -> ze_result_t; -} -impl _ze_cache_config_flags_t { - #[doc = "< Large SLM size"] - pub const ZE_CACHE_CONFIG_FLAG_LARGE_SLM: _ze_cache_config_flags_t = - _ze_cache_config_flags_t(1); -} -impl _ze_cache_config_flags_t { - #[doc = "< Large General Data size"] - pub const ZE_CACHE_CONFIG_FLAG_LARGE_DATA: _ze_cache_config_flags_t = - _ze_cache_config_flags_t(2); -} -impl _ze_cache_config_flags_t { - pub const ZE_CACHE_CONFIG_FLAG_FORCE_UINT32: _ze_cache_config_flags_t = - _ze_cache_config_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_cache_config_flags_t> for _ze_cache_config_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_cache_config_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_cache_config_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_cache_config_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_cache_config_flags_t> for _ze_cache_config_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_cache_config_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_cache_config_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_cache_config_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported Cache Config flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_cache_config_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported Cache Config flags"] -pub use self::_ze_cache_config_flags_t as ze_cache_config_flags_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Sets the preferred cache configuration for a kernel on the current"] - #[doc = " Host thread."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The cache configuration will be used when a"] - #[doc = " ::zeCommandListAppendLaunchKernel variant is called."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same kernel handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x3 < flags`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_FEATURE"] - pub fn zeKernelSetCacheConfig( - hKernel: ze_kernel_handle_t, - flags: ze_cache_config_flags_t, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Kernel universal unique id (UUID)"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_kernel_uuid_t { - #[doc = "< [out] opaque data representing a kernel UUID"] - pub kid: [u8; 16usize], - #[doc = "< [out] opaque data representing the kernel's module UUID"] - pub mid: [u8; 16usize], -} -#[test] -fn bindgen_test_layout__ze_kernel_uuid_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_uuid_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_kernel_uuid_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_uuid_t>(), - 1usize, - concat!("Alignment of ", stringify!(_ze_kernel_uuid_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_uuid_t>())).kid as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_uuid_t), - "::", - stringify!(kid) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_uuid_t>())).mid as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_uuid_t), - "::", - stringify!(mid) - ) - ); -} -#[doc = ""] -#[doc = " @brief Kernel properties"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] number of kernel arguments."] - pub numKernelArgs: u32, - #[doc = "< [out] required group size in the X dimension,"] - #[doc = "< or zero if there is no required group size"] - pub requiredGroupSizeX: u32, - #[doc = "< [out] required group size in the Y dimension,"] - #[doc = "< or zero if there is no required group size"] - pub requiredGroupSizeY: u32, - #[doc = "< [out] required group size in the Z dimension,"] - #[doc = "< or zero if there is no required group size"] - pub requiredGroupSizeZ: u32, - #[doc = "< [out] required number of subgroups per thread group,"] - #[doc = "< or zero if there is no required number of subgroups"] - pub requiredNumSubGroups: u32, - #[doc = "< [out] required subgroup size,"] - #[doc = "< or zero if there is no required subgroup size"] - pub requiredSubgroupSize: u32, - #[doc = "< [out] maximum subgroup size"] - pub maxSubgroupSize: u32, - #[doc = "< [out] maximum number of subgroups per thread group"] - pub maxNumSubgroups: u32, - #[doc = "< [out] local memory size used by each thread group"] - pub localMemSize: u32, - #[doc = "< [out] private memory size allocated by compiler used by each thread"] - pub privateMemSize: u32, - #[doc = "< [out] spill memory size allocated by compiler"] - pub spillMemSize: u32, - #[doc = "< [out] universal unique identifier."] - pub uuid: ze_kernel_uuid_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_properties_t>(), - 96usize, - concat!("Size of: ", stringify!(_ze_kernel_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_properties_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_properties_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_properties_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_properties_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).numKernelArgs as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(numKernelArgs) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).requiredGroupSizeX as *const _ - as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(requiredGroupSizeX) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).requiredGroupSizeY as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(requiredGroupSizeY) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).requiredGroupSizeZ as *const _ - as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(requiredGroupSizeZ) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).requiredNumSubGroups as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(requiredNumSubGroups) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).requiredSubgroupSize as *const _ - as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(requiredSubgroupSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).maxSubgroupSize as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(maxSubgroupSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).maxNumSubgroups as *const _ as usize - }, - 44usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(maxNumSubgroups) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).localMemSize as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(localMemSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).privateMemSize as *const _ as usize - }, - 52usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(privateMemSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_properties_t>())).spillMemSize as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(spillMemSize) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_kernel_properties_t>())).uuid as *const _ as usize }, - 60usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_properties_t), - "::", - stringify!(uuid) - ) - ); -} -impl Default for _ze_kernel_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve kernel properties."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pKernelProperties`"] - pub fn zeKernelGetProperties( - hKernel: ze_kernel_handle_t, - pKernelProperties: *mut ze_kernel_properties_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Retrieve kernel name from Kernel."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The caller can pass nullptr for pName when querying only for size."] - #[doc = " - The implementation will copy the kernel name into a buffer supplied by"] - #[doc = " the caller."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pSize`"] - pub fn zeKernelGetName( - hKernel: ze_kernel_handle_t, - pSize: *mut usize, - pName: *mut ::std::os::raw::c_char, - ) -> ze_result_t; -} -#[doc = ""] -#[doc = " @brief Kernel dispatch group count."] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_group_count_t { - #[doc = "< [in] number of thread groups in X dimension"] - pub groupCountX: u32, - #[doc = "< [in] number of thread groups in Y dimension"] - pub groupCountY: u32, - #[doc = "< [in] number of thread groups in Z dimension"] - pub groupCountZ: u32, -} -#[test] -fn bindgen_test_layout__ze_group_count_t() { - assert_eq!( - ::std::mem::size_of::<_ze_group_count_t>(), - 12usize, - concat!("Size of: ", stringify!(_ze_group_count_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_group_count_t>(), - 4usize, - concat!("Alignment of ", stringify!(_ze_group_count_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_group_count_t>())).groupCountX as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_group_count_t), - "::", - stringify!(groupCountX) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_group_count_t>())).groupCountY as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(_ze_group_count_t), - "::", - stringify!(groupCountY) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_group_count_t>())).groupCountZ as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_group_count_t), - "::", - stringify!(groupCountZ) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Launch kernel over one or more work groups."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the kernel and events are accessible by"] - #[doc = " the device on which the command list was created."] - #[doc = " - This may **only** be called for a command list created with command"] - #[doc = " queue group ordinal that supports compute."] - #[doc = " - The application must ensure the command list, kernel and events were"] - #[doc = " created on the same context."] - #[doc = " - This function may **not** be called from simultaneous threads with the"] - #[doc = " same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pLaunchFuncArgs`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendLaunchKernel( - hCommandList: ze_command_list_handle_t, - hKernel: ze_kernel_handle_t, - pLaunchFuncArgs: *const ze_group_count_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Launch kernel cooperatively over one or more work groups."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the kernel and events are accessible by"] - #[doc = " the device on which the command list was created."] - #[doc = " - This may **only** be called for a command list created with command"] - #[doc = " queue group ordinal that supports compute."] - #[doc = " - This may only be used for a command list that are submitted to command"] - #[doc = " queue with cooperative flag set."] - #[doc = " - The application must ensure the command list, kernel and events were"] - #[doc = " created on the same context."] - #[doc = " - This function may **not** be called from simultaneous threads with the"] - #[doc = " same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = " - Use ::zeKernelSuggestMaxCooperativeGroupCount to recommend max group"] - #[doc = " count for device for cooperative functions that device supports."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pLaunchFuncArgs`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendLaunchCooperativeKernel( - hCommandList: ze_command_list_handle_t, - hKernel: ze_kernel_handle_t, - pLaunchFuncArgs: *const ze_group_count_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Launch kernel over one or more work groups using indirect arguments."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the kernel and events are accessible by"] - #[doc = " the device on which the command list was created."] - #[doc = " - The application must ensure the launch arguments are visible to the"] - #[doc = " device on which the command list was created."] - #[doc = " - The implementation must not access the contents of the launch"] - #[doc = " arguments as they are free to be modified by either the Host or device"] - #[doc = " up until execution."] - #[doc = " - This may **only** be called for a command list created with command"] - #[doc = " queue group ordinal that supports compute."] - #[doc = " - The application must ensure the command list, kernel and events were"] - #[doc = " created, and the memory was allocated, on the same context."] - #[doc = " - This function may **not** be called from simultaneous threads with the"] - #[doc = " same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " + `nullptr == hKernel`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pLaunchArgumentsBuffer`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendLaunchKernelIndirect( - hCommandList: ze_command_list_handle_t, - hKernel: ze_kernel_handle_t, - pLaunchArgumentsBuffer: *const ze_group_count_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Launch multiple kernels over one or more work groups using an array of"] - #[doc = " indirect arguments."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the kernel and events are accessible by"] - #[doc = " the device on which the command list was created."] - #[doc = " - The application must ensure the array of launch arguments and count"] - #[doc = " buffer are visible to the device on which the command list was"] - #[doc = " created."] - #[doc = " - The implementation must not access the contents of the array of launch"] - #[doc = " arguments or count buffer as they are free to be modified by either"] - #[doc = " the Host or device up until execution."] - #[doc = " - This may **only** be called for a command list created with command"] - #[doc = " queue group ordinal that supports compute."] - #[doc = " - The application must enusre the command list, kernel and events were"] - #[doc = " created, and the memory was allocated, on the same context."] - #[doc = " - This function may **not** be called from simultaneous threads with the"] - #[doc = " same command list handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hCommandList`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == phKernels`"] - #[doc = " + `nullptr == pCountBuffer`"] - #[doc = " + `nullptr == pLaunchArgumentsBuffer`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SYNCHRONIZATION_OBJECT"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_SIZE"] - #[doc = " + `(nullptr == phWaitEvents) && (0 < numWaitEvents)`"] - pub fn zeCommandListAppendLaunchMultipleKernelsIndirect( - hCommandList: ze_command_list_handle_t, - numKernels: u32, - phKernels: *mut ze_kernel_handle_t, - pCountBuffer: *const u32, - pLaunchArgumentsBuffer: *const ze_group_count_t, - hSignalEvent: ze_event_handle_t, - numWaitEvents: u32, - phWaitEvents: *mut ze_event_handle_t, - ) -> ze_result_t; -} -impl _ze_module_program_exp_version_t { - #[doc = "< version 1.0"] - pub const ZE_MODULE_PROGRAM_EXP_VERSION_1_0: _ze_module_program_exp_version_t = - _ze_module_program_exp_version_t(65536); -} -impl _ze_module_program_exp_version_t { - #[doc = "< latest known version"] - pub const ZE_MODULE_PROGRAM_EXP_VERSION_CURRENT: _ze_module_program_exp_version_t = - _ze_module_program_exp_version_t(65536); -} -impl _ze_module_program_exp_version_t { - pub const ZE_MODULE_PROGRAM_EXP_VERSION_FORCE_UINT32: _ze_module_program_exp_version_t = - _ze_module_program_exp_version_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Module Program Extension Version(s)"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_module_program_exp_version_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Module Program Extension Version(s)"] -pub use self::_ze_module_program_exp_version_t as ze_module_program_exp_version_t; -#[doc = ""] -#[doc = " @brief Module extended descriptor to support multiple input modules."] -#[doc = ""] -#[doc = " @details"] -#[doc = " - Implementation must support ::ZE_experimental_module_program extension"] -#[doc = " - pInputModules, pBuildFlags, and pConstants from ::ze_module_desc_t is"] -#[doc = " ignored."] -#[doc = " - Format in ::ze_module_desc_t needs to be set to"] -#[doc = " ::ZE_MODULE_FORMAT_IL_SPIRV."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_program_exp_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] Count of input modules"] - pub count: u32, - #[doc = "< [in][range(0, count)] sizes of each input IL module in pInputModules."] - pub inputSizes: *const usize, - #[doc = "< [in][range(0, count)] pointer to an array of IL (e.g. SPIR-V modules)."] - #[doc = "< Valid only for SPIR-V input."] - pub pInputModules: *mut *const u8, - #[doc = "< [in][optional][range(0, count)] array of strings containing build"] - #[doc = "< flags. See pBuildFlags in ::ze_module_desc_t."] - pub pBuildFlags: *mut *const ::std::os::raw::c_char, - #[doc = "< [in][optional][range(0, count)] pointer to array of specialization"] - #[doc = "< constant strings. Valid only for SPIR-V input. This must be set to"] - #[doc = "< nullptr if no specialization constants are provided."] - pub pConstants: *mut *const ze_module_constants_t, -} -#[test] -fn bindgen_test_layout__ze_module_program_exp_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_program_exp_desc_t>(), - 56usize, - concat!("Size of: ", stringify!(_ze_module_program_exp_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_program_exp_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_module_program_exp_desc_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_program_exp_desc_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_program_exp_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_program_exp_desc_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_program_exp_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_program_exp_desc_t>())).count as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_program_exp_desc_t), - "::", - stringify!(count) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_program_exp_desc_t>())).inputSizes as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_program_exp_desc_t), - "::", - stringify!(inputSizes) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_program_exp_desc_t>())).pInputModules as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_program_exp_desc_t), - "::", - stringify!(pInputModules) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_program_exp_desc_t>())).pBuildFlags as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_program_exp_desc_t), - "::", - stringify!(pBuildFlags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_program_exp_desc_t>())).pConstants as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_program_exp_desc_t), - "::", - stringify!(pConstants) - ) - ); -} -impl _ze_raytracing_ext_version_t { - #[doc = "< version 1.0"] - pub const ZE_RAYTRACING_EXT_VERSION_1_0: _ze_raytracing_ext_version_t = - _ze_raytracing_ext_version_t(65536); -} -impl _ze_raytracing_ext_version_t { - #[doc = "< latest known version"] - pub const ZE_RAYTRACING_EXT_VERSION_CURRENT: _ze_raytracing_ext_version_t = - _ze_raytracing_ext_version_t(65536); -} -impl _ze_raytracing_ext_version_t { - pub const ZE_RAYTRACING_EXT_VERSION_FORCE_UINT32: _ze_raytracing_ext_version_t = - _ze_raytracing_ext_version_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Raytracing Extension Version(s)"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_raytracing_ext_version_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Raytracing Extension Version(s)"] -pub use self::_ze_raytracing_ext_version_t as ze_raytracing_ext_version_t; -impl _ze_device_raytracing_ext_flags_t { - #[doc = "< Supports rayquery"] - pub const ZE_DEVICE_RAYTRACING_EXT_FLAG_RAYQUERY: _ze_device_raytracing_ext_flags_t = - _ze_device_raytracing_ext_flags_t(1); -} -impl _ze_device_raytracing_ext_flags_t { - pub const ZE_DEVICE_RAYTRACING_EXT_FLAG_FORCE_UINT32: _ze_device_raytracing_ext_flags_t = - _ze_device_raytracing_ext_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_raytracing_ext_flags_t> for _ze_device_raytracing_ext_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_raytracing_ext_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_raytracing_ext_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_raytracing_ext_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_raytracing_ext_flags_t> for _ze_device_raytracing_ext_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_raytracing_ext_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_raytracing_ext_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_raytracing_ext_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported raytracing capability flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_raytracing_ext_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported raytracing capability flags"] -pub use self::_ze_device_raytracing_ext_flags_t as ze_device_raytracing_ext_flags_t; -#[doc = ""] -#[doc = " @brief Raytracing properties queried using ::zeDeviceGetModuleProperties"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - This structure may be returned from ::zeDeviceGetModuleProperties, via"] -#[doc = " `pNext` member of ::ze_device_module_properties_t."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_raytracing_ext_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] 0 or a valid combination of ::ze_device_raytracing_ext_flags_t"] - pub flags: ze_device_raytracing_ext_flags_t, - #[doc = "< [out] Maximum number of BVH levels supported"] - pub maxBVHLevels: u32, -} -#[test] -fn bindgen_test_layout__ze_device_raytracing_ext_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_raytracing_ext_properties_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_device_raytracing_ext_properties_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_raytracing_ext_properties_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_raytracing_ext_properties_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_raytracing_ext_properties_t>())).stype as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_raytracing_ext_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_raytracing_ext_properties_t>())).pNext as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_raytracing_ext_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_raytracing_ext_properties_t>())).flags as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_raytracing_ext_properties_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_raytracing_ext_properties_t>())).maxBVHLevels - as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_raytracing_ext_properties_t), - "::", - stringify!(maxBVHLevels) - ) - ); -} -impl Default for _ze_device_raytracing_ext_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -impl _ze_raytracing_mem_alloc_ext_flags_t { - #[doc = "< reserved for future use"] - pub const ZE_RAYTRACING_MEM_ALLOC_EXT_FLAG_TBD: _ze_raytracing_mem_alloc_ext_flags_t = - _ze_raytracing_mem_alloc_ext_flags_t(1); -} -impl _ze_raytracing_mem_alloc_ext_flags_t { - pub const ZE_RAYTRACING_MEM_ALLOC_EXT_FLAG_FORCE_UINT32: _ze_raytracing_mem_alloc_ext_flags_t = - _ze_raytracing_mem_alloc_ext_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_raytracing_mem_alloc_ext_flags_t> - for _ze_raytracing_mem_alloc_ext_flags_t -{ - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_raytracing_mem_alloc_ext_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_raytracing_mem_alloc_ext_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_raytracing_mem_alloc_ext_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_raytracing_mem_alloc_ext_flags_t> - for _ze_raytracing_mem_alloc_ext_flags_t -{ - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_raytracing_mem_alloc_ext_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_raytracing_mem_alloc_ext_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_raytracing_mem_alloc_ext_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported raytracing memory allocation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_raytracing_mem_alloc_ext_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported raytracing memory allocation flags"] -pub use self::_ze_raytracing_mem_alloc_ext_flags_t as ze_raytracing_mem_alloc_ext_flags_t; -#[doc = ""] -#[doc = " @brief Raytracing memory allocation descriptor"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - This structure must be passed to ::zeMemAllocShared or"] -#[doc = " ::zeMemAllocDevice, via `pNext` member of"] -#[doc = " ::ze_device_mem_alloc_desc_t, for any memory allocation that is to be"] -#[doc = " accessed by raytracing fixed-function of the device."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_raytracing_mem_alloc_ext_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] flags specifying additional allocation controls."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_raytracing_mem_alloc_ext_flags_t;"] - #[doc = "< default behavior may use implicit driver-based heuristics."] - pub flags: ze_raytracing_mem_alloc_ext_flags_t, -} -#[test] -fn bindgen_test_layout__ze_raytracing_mem_alloc_ext_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_raytracing_mem_alloc_ext_desc_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_raytracing_mem_alloc_ext_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_raytracing_mem_alloc_ext_desc_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_raytracing_mem_alloc_ext_desc_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_raytracing_mem_alloc_ext_desc_t>())).stype as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_raytracing_mem_alloc_ext_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_raytracing_mem_alloc_ext_desc_t>())).pNext as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_raytracing_mem_alloc_ext_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_raytracing_mem_alloc_ext_desc_t>())).flags as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_raytracing_mem_alloc_ext_desc_t), - "::", - stringify!(flags) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Makes memory resident for the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the memory is resident before being"] - #[doc = " referenced by the device"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeContextMakeMemoryResident( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - ptr: *mut ::std::os::raw::c_void, - size: usize, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Allows memory to be evicted from the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the memory before it is evicted"] - #[doc = " - The application may free the memory without evicting; the memory is"] - #[doc = " implicitly evicted when freed."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeContextEvictMemory( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - ptr: *mut ::std::os::raw::c_void, - size: usize, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Makes image resident for the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the image is resident before being"] - #[doc = " referenced by the device"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " + `nullptr == hImage`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeContextMakeImageResident( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - hImage: ze_image_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Allows image to be evicted from the device."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the image before it is evicted"] - #[doc = " - The application may destroy the image without evicting; the image is"] - #[doc = " implicitly evicted when destroyed."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " + `nullptr == hImage`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeContextEvictImage( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - hImage: ze_image_handle_t, - ) -> ze_result_t; -} -impl _ze_sampler_address_mode_t { - #[doc = "< No coordinate modifications for out-of-bounds image access."] - pub const ZE_SAMPLER_ADDRESS_MODE_NONE: _ze_sampler_address_mode_t = - _ze_sampler_address_mode_t(0); -} -impl _ze_sampler_address_mode_t { - #[doc = "< Out-of-bounds coordinates are wrapped back around."] - pub const ZE_SAMPLER_ADDRESS_MODE_REPEAT: _ze_sampler_address_mode_t = - _ze_sampler_address_mode_t(1); -} -impl _ze_sampler_address_mode_t { - #[doc = "< Out-of-bounds coordinates are clamped to edge."] - pub const ZE_SAMPLER_ADDRESS_MODE_CLAMP: _ze_sampler_address_mode_t = - _ze_sampler_address_mode_t(2); -} -impl _ze_sampler_address_mode_t { - #[doc = "< Out-of-bounds coordinates are clamped to border color which is (0.0f,"] - #[doc = "< 0.0f, 0.0f, 0.0f) if image format swizzle contains alpha, otherwise"] - #[doc = "< (0.0f, 0.0f, 0.0f, 1.0f)."] - pub const ZE_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER: _ze_sampler_address_mode_t = - _ze_sampler_address_mode_t(3); -} -impl _ze_sampler_address_mode_t { - #[doc = "< Out-of-bounds coordinates are mirrored starting from edge."] - pub const ZE_SAMPLER_ADDRESS_MODE_MIRROR: _ze_sampler_address_mode_t = - _ze_sampler_address_mode_t(4); -} -impl _ze_sampler_address_mode_t { - pub const ZE_SAMPLER_ADDRESS_MODE_FORCE_UINT32: _ze_sampler_address_mode_t = - _ze_sampler_address_mode_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Sampler addressing modes"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_sampler_address_mode_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Sampler addressing modes"] -pub use self::_ze_sampler_address_mode_t as ze_sampler_address_mode_t; -impl _ze_sampler_filter_mode_t { - #[doc = "< No coordinate modifications for out of bounds image access."] - pub const ZE_SAMPLER_FILTER_MODE_NEAREST: _ze_sampler_filter_mode_t = - _ze_sampler_filter_mode_t(0); -} -impl _ze_sampler_filter_mode_t { - #[doc = "< Out-of-bounds coordinates are wrapped back around."] - pub const ZE_SAMPLER_FILTER_MODE_LINEAR: _ze_sampler_filter_mode_t = - _ze_sampler_filter_mode_t(1); -} -impl _ze_sampler_filter_mode_t { - pub const ZE_SAMPLER_FILTER_MODE_FORCE_UINT32: _ze_sampler_filter_mode_t = - _ze_sampler_filter_mode_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Sampler filtering modes"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_sampler_filter_mode_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Sampler filtering modes"] -pub use self::_ze_sampler_filter_mode_t as ze_sampler_filter_mode_t; -#[doc = ""] -#[doc = " @brief Sampler descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_sampler_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] Sampler addressing mode to determine how out-of-bounds"] - #[doc = "< coordinates are handled."] - pub addressMode: ze_sampler_address_mode_t, - #[doc = "< [in] Sampler filter mode to determine how samples are filtered."] - pub filterMode: ze_sampler_filter_mode_t, - #[doc = "< [in] Are coordinates normalized [0, 1] or not."] - pub isNormalized: ze_bool_t, -} -#[test] -fn bindgen_test_layout__ze_sampler_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_sampler_desc_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_sampler_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_sampler_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_sampler_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_sampler_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_sampler_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_sampler_desc_t>())).addressMode as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_desc_t), - "::", - stringify!(addressMode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_sampler_desc_t>())).filterMode as *const _ as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_desc_t), - "::", - stringify!(filterMode) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_sampler_desc_t>())).isNormalized as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_desc_t), - "::", - stringify!(isNormalized) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates sampler on the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must only use the sampler for the device, or its"] - #[doc = " sub-devices, which was provided during creation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phSampler`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `::ZE_SAMPLER_ADDRESS_MODE_MIRROR < desc->addressMode`"] - #[doc = " + `::ZE_SAMPLER_FILTER_MODE_LINEAR < desc->filterMode`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - pub fn zeSamplerCreate( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - desc: *const ze_sampler_desc_t, - phSampler: *mut ze_sampler_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys sampler object"] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the sampler before it is deleted."] - #[doc = " - The implementation of this function may immediately free all Host and"] - #[doc = " Device allocations associated with this sampler."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same sampler handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hSampler`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zeSamplerDestroy(hSampler: ze_sampler_handle_t) -> ze_result_t; -} -impl _ze_memory_access_attribute_t { - #[doc = "< Indicates the memory page is inaccessible."] - pub const ZE_MEMORY_ACCESS_ATTRIBUTE_NONE: _ze_memory_access_attribute_t = - _ze_memory_access_attribute_t(0); -} -impl _ze_memory_access_attribute_t { - #[doc = "< Indicates the memory page supports read write access."] - pub const ZE_MEMORY_ACCESS_ATTRIBUTE_READWRITE: _ze_memory_access_attribute_t = - _ze_memory_access_attribute_t(1); -} -impl _ze_memory_access_attribute_t { - #[doc = "< Indicates the memory page supports read-only access."] - pub const ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY: _ze_memory_access_attribute_t = - _ze_memory_access_attribute_t(2); -} -impl _ze_memory_access_attribute_t { - pub const ZE_MEMORY_ACCESS_ATTRIBUTE_FORCE_UINT32: _ze_memory_access_attribute_t = - _ze_memory_access_attribute_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Virtual memory page access attributes"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_memory_access_attribute_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Virtual memory page access attributes"] -pub use self::_ze_memory_access_attribute_t as ze_memory_access_attribute_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Reserves pages in virtual address space."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must only use the memory allocation on the context for"] - #[doc = " which it was created."] - #[doc = " - The starting address and size must be page aligned. See"] - #[doc = " ::zeVirtualMemQueryPageSize."] - #[doc = " - If pStart is not null then implementation will attempt to reserve"] - #[doc = " starting from that address. If not available then will find another"] - #[doc = " suitable starting address."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The access attributes will default to none to indicate reservation is"] - #[doc = " inaccessible."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pStart`"] - #[doc = " + `nullptr == pptr`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - pub fn zeVirtualMemReserve( - hContext: ze_context_handle_t, - pStart: *const ::std::os::raw::c_void, - size: usize, - pptr: *mut *mut ::std::os::raw::c_void, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Free pages in a reserved virtual address range."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - Any existing virtual mappings for the range will be unmapped."] - #[doc = " - Physical allocations objects that were mapped to this range will not"] - #[doc = " be destroyed. These need to be destroyed explicitly."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT"] - pub fn zeVirtualMemFree( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - size: usize, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Queries page size to use for aligning virtual memory reservations and"] - #[doc = " physical memory allocations."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == pagesize`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - pub fn zeVirtualMemQueryPageSize( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - size: usize, - pagesize: *mut usize, - ) -> ze_result_t; -} -impl _ze_physical_mem_flags_t { - #[doc = "< reserved for future use."] - pub const ZE_PHYSICAL_MEM_FLAG_TBD: _ze_physical_mem_flags_t = _ze_physical_mem_flags_t(1); -} -impl _ze_physical_mem_flags_t { - pub const ZE_PHYSICAL_MEM_FLAG_FORCE_UINT32: _ze_physical_mem_flags_t = - _ze_physical_mem_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_physical_mem_flags_t> for _ze_physical_mem_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_physical_mem_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_physical_mem_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_physical_mem_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_physical_mem_flags_t> for _ze_physical_mem_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_physical_mem_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_physical_mem_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_physical_mem_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported physical memory creation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_physical_mem_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported physical memory creation flags"] -pub use self::_ze_physical_mem_flags_t as ze_physical_mem_flags_t; -#[doc = ""] -#[doc = " @brief Physical memory descriptor"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_physical_mem_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] creation flags."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_physical_mem_flags_t."] - pub flags: ze_physical_mem_flags_t, - #[doc = "< [in] size in bytes to reserve; must be page aligned."] - pub size: usize, -} -#[test] -fn bindgen_test_layout__ze_physical_mem_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_physical_mem_desc_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_physical_mem_desc_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_physical_mem_desc_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_physical_mem_desc_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_physical_mem_desc_t>())).stype as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_physical_mem_desc_t>())).pNext as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_physical_mem_desc_t>())).flags as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_desc_t), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_physical_mem_desc_t>())).size as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_desc_t), - "::", - stringify!(size) - ) - ); -} -extern "C" { - #[doc = ""] - #[doc = " @brief Creates a physical memory object for the context."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must only use the physical memory object on the"] - #[doc = " context for which it was created."] - #[doc = " - The size must be page aligned. See ::zeVirtualMemQueryPageSize."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hDevice`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == desc`"] - #[doc = " + `nullptr == phPhysicalMemory`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `0x1 < desc->flags`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == desc->size`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT"] - pub fn zePhysicalMemCreate( - hContext: ze_context_handle_t, - hDevice: ze_device_handle_t, - desc: *mut ze_physical_mem_desc_t, - phPhysicalMemory: *mut ze_physical_mem_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Destroys a physical memory object."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The application must ensure the device is not currently referencing"] - #[doc = " the physical memory object before it is deleted"] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same physical memory handle."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hPhysicalMemory`"] - #[doc = " - ::ZE_RESULT_ERROR_HANDLE_OBJECT_IN_USE"] - pub fn zePhysicalMemDestroy( - hContext: ze_context_handle_t, - hPhysicalMemory: ze_physical_mem_handle_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Maps pages in virtual address space to pages from physical memory"] - #[doc = " object."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The virtual address range must have been reserved using"] - #[doc = " ::zeVirtualMemReserve."] - #[doc = " - The application must only use the mapped memory allocation on the"] - #[doc = " context for which it was created."] - #[doc = " - The virtual start address and size must be page aligned. See"] - #[doc = " ::zeVirtualMemQueryPageSize."] - #[doc = " - The application should use, for the starting address and size, the"] - #[doc = " same size alignment used for the physical allocation."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " + `nullptr == hPhysicalMemory`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `::ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY < access`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT"] - pub fn zeVirtualMemMap( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - size: usize, - hPhysicalMemory: ze_physical_mem_handle_t, - offset: usize, - access: ze_memory_access_attribute_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Unmaps pages in virtual address space from pages from a physical"] - #[doc = " memory object."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The page access attributes for virtual address range will revert back"] - #[doc = " to none."] - #[doc = " - The application may call this function from simultaneous threads."] - #[doc = " - The implementation of this function must be thread-safe."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_HOST_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_OUT_OF_DEVICE_MEMORY"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT - \"Address must be page aligned\""] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " + Size must be page aligned"] - pub fn zeVirtualMemUnmap( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - size: usize, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Set memory access attributes for a virtual address range."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - This function may be called from simultaneous threads with the same"] - #[doc = " function handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_ENUMERATION"] - #[doc = " + `::ZE_MEMORY_ACCESS_ATTRIBUTE_READONLY < access`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT - \"Address must be page aligned\""] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " + Size must be page aligned"] - pub fn zeVirtualMemSetAccessAttribute( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - size: usize, - access: ze_memory_access_attribute_t, - ) -> ze_result_t; -} -extern "C" { - #[doc = ""] - #[doc = " @brief Get memory access attribute for a virtual address range."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - If size and outSize are equal then the pages in the specified virtual"] - #[doc = " address range have the same access attributes."] - #[doc = " - This function may be called from simultaneous threads with the same"] - #[doc = " function handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hContext`"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_POINTER"] - #[doc = " + `nullptr == ptr`"] - #[doc = " + `nullptr == access`"] - #[doc = " + `nullptr == outSize`"] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_ALIGNMENT - \"Address must be page aligned\""] - #[doc = " - ::ZE_RESULT_ERROR_UNSUPPORTED_SIZE"] - #[doc = " + `0 == size`"] - #[doc = " + Size must be page aligned"] - pub fn zeVirtualMemGetAccessAttribute( - hContext: ze_context_handle_t, - ptr: *const ::std::os::raw::c_void, - size: usize, - access: *mut ze_memory_access_attribute_t, - outSize: *mut usize, - ) -> ze_result_t; -} -impl _ze_float_atomics_ext_version_t { - #[doc = "< version 1.0"] - pub const ZE_FLOAT_ATOMICS_EXT_VERSION_1_0: _ze_float_atomics_ext_version_t = - _ze_float_atomics_ext_version_t(65536); -} -impl _ze_float_atomics_ext_version_t { - #[doc = "< latest known version"] - pub const ZE_FLOAT_ATOMICS_EXT_VERSION_CURRENT: _ze_float_atomics_ext_version_t = - _ze_float_atomics_ext_version_t(65536); -} -impl _ze_float_atomics_ext_version_t { - pub const ZE_FLOAT_ATOMICS_EXT_VERSION_FORCE_UINT32: _ze_float_atomics_ext_version_t = - _ze_float_atomics_ext_version_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Floating-Point Atomics Extension Version(s)"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_float_atomics_ext_version_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Floating-Point Atomics Extension Version(s)"] -pub use self::_ze_float_atomics_ext_version_t as ze_float_atomics_ext_version_t; -impl _ze_device_fp_atomic_ext_flags_t { - #[doc = "< Supports atomic load, store, and exchange"] - pub const ZE_DEVICE_FP_ATOMIC_EXT_FLAG_GLOBAL_LOAD_STORE: _ze_device_fp_atomic_ext_flags_t = - _ze_device_fp_atomic_ext_flags_t(1); -} -impl _ze_device_fp_atomic_ext_flags_t { - #[doc = "< Supports atomic add and subtract"] - pub const ZE_DEVICE_FP_ATOMIC_EXT_FLAG_GLOBAL_ADD: _ze_device_fp_atomic_ext_flags_t = - _ze_device_fp_atomic_ext_flags_t(2); -} -impl _ze_device_fp_atomic_ext_flags_t { - #[doc = "< Supports atomic min and max"] - pub const ZE_DEVICE_FP_ATOMIC_EXT_FLAG_GLOBAL_MIN_MAX: _ze_device_fp_atomic_ext_flags_t = - _ze_device_fp_atomic_ext_flags_t(4); -} -impl _ze_device_fp_atomic_ext_flags_t { - #[doc = "< Supports atomic load, store, and exchange"] - pub const ZE_DEVICE_FP_ATOMIC_EXT_FLAG_LOCAL_LOAD_STORE: _ze_device_fp_atomic_ext_flags_t = - _ze_device_fp_atomic_ext_flags_t(65536); -} -impl _ze_device_fp_atomic_ext_flags_t { - #[doc = "< Supports atomic add and subtract"] - pub const ZE_DEVICE_FP_ATOMIC_EXT_FLAG_LOCAL_ADD: _ze_device_fp_atomic_ext_flags_t = - _ze_device_fp_atomic_ext_flags_t(131072); -} -impl _ze_device_fp_atomic_ext_flags_t { - #[doc = "< Supports atomic min and max"] - pub const ZE_DEVICE_FP_ATOMIC_EXT_FLAG_LOCAL_MIN_MAX: _ze_device_fp_atomic_ext_flags_t = - _ze_device_fp_atomic_ext_flags_t(262144); -} -impl _ze_device_fp_atomic_ext_flags_t { - pub const ZE_DEVICE_FP_ATOMIC_EXT_FLAG_FORCE_UINT32: _ze_device_fp_atomic_ext_flags_t = - _ze_device_fp_atomic_ext_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_device_fp_atomic_ext_flags_t> for _ze_device_fp_atomic_ext_flags_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_device_fp_atomic_ext_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_device_fp_atomic_ext_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_device_fp_atomic_ext_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_device_fp_atomic_ext_flags_t> for _ze_device_fp_atomic_ext_flags_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_device_fp_atomic_ext_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_device_fp_atomic_ext_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_device_fp_atomic_ext_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported floating-point atomic capability flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_device_fp_atomic_ext_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported floating-point atomic capability flags"] -pub use self::_ze_device_fp_atomic_ext_flags_t as ze_device_fp_atomic_ext_flags_t; -#[doc = ""] -#[doc = " @brief Device floating-point atomic properties queried using"] -#[doc = " ::zeDeviceGetModuleProperties"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - This structure may be returned from ::zeDeviceGetModuleProperties, via"] -#[doc = " `pNext` member of ::ze_device_module_properties_t."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_float_atomic_ext_properties_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in,out][optional] pointer to extension-specific structure"] - pub pNext: *mut ::std::os::raw::c_void, - #[doc = "< [out] Capabilities for half-precision floating-point atomic operations"] - pub fp16Flags: ze_device_fp_atomic_ext_flags_t, - #[doc = "< [out] Capabilities for single-precision floating-point atomic"] - #[doc = "< operations"] - pub fp32Flags: ze_device_fp_atomic_ext_flags_t, - #[doc = "< [out] Capabilities for double-precision floating-point atomic"] - #[doc = "< operations"] - pub fp64Flags: ze_device_fp_atomic_ext_flags_t, -} -#[test] -fn bindgen_test_layout__ze_float_atomic_ext_properties_t() { - assert_eq!( - ::std::mem::size_of::<_ze_float_atomic_ext_properties_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_float_atomic_ext_properties_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_float_atomic_ext_properties_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_float_atomic_ext_properties_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_float_atomic_ext_properties_t>())).stype as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_float_atomic_ext_properties_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_float_atomic_ext_properties_t>())).pNext as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_float_atomic_ext_properties_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_float_atomic_ext_properties_t>())).fp16Flags as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_float_atomic_ext_properties_t), - "::", - stringify!(fp16Flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_float_atomic_ext_properties_t>())).fp32Flags as *const _ - as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(_ze_float_atomic_ext_properties_t), - "::", - stringify!(fp32Flags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_float_atomic_ext_properties_t>())).fp64Flags as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_float_atomic_ext_properties_t), - "::", - stringify!(fp64Flags) - ) - ); -} -impl Default for _ze_float_atomic_ext_properties_t { - fn default() -> Self { - unsafe { ::std::mem::zeroed() } - } -} -impl _ze_global_offset_exp_version_t { - #[doc = "< version 1.0"] - pub const ZE_GLOBAL_OFFSET_EXP_VERSION_1_0: _ze_global_offset_exp_version_t = - _ze_global_offset_exp_version_t(65536); -} -impl _ze_global_offset_exp_version_t { - #[doc = "< latest known version"] - pub const ZE_GLOBAL_OFFSET_EXP_VERSION_CURRENT: _ze_global_offset_exp_version_t = - _ze_global_offset_exp_version_t(65536); -} -impl _ze_global_offset_exp_version_t { - pub const ZE_GLOBAL_OFFSET_EXP_VERSION_FORCE_UINT32: _ze_global_offset_exp_version_t = - _ze_global_offset_exp_version_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Global Offset Extension Version(s)"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_global_offset_exp_version_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Global Offset Extension Version(s)"] -pub use self::_ze_global_offset_exp_version_t as ze_global_offset_exp_version_t; -extern "C" { - #[doc = ""] - #[doc = " @brief Set global work offset for a kernel on the current Host thread."] - #[doc = ""] - #[doc = " @details"] - #[doc = " - The global work offset will be used when"] - #[doc = " a┬á::zeCommandListAppendLaunchKernel()┬ávariant is called."] - #[doc = " - The application must **not** call this function from simultaneous"] - #[doc = " threads with the same kernel handle."] - #[doc = " - The implementation of this function should be lock-free."] - #[doc = ""] - #[doc = " @returns"] - #[doc = " - ::ZE_RESULT_SUCCESS"] - #[doc = " - ::ZE_RESULT_ERROR_UNINITIALIZED"] - #[doc = " - ::ZE_RESULT_ERROR_DEVICE_LOST"] - #[doc = " - ::ZE_RESULT_ERROR_INVALID_NULL_HANDLE"] - #[doc = " + `nullptr == hKernel`"] - pub fn zeKernelSetGlobalOffsetExp( - hKernel: ze_kernel_handle_t, - offsetX: u32, - offsetY: u32, - offsetZ: u32, - ) -> ze_result_t; -} -impl _ze_relaxed_allocation_limits_exp_version_t { - #[doc = "< version 1.0"] - pub const ZE_RELAXED_ALLOCATION_LIMITS_EXP_VERSION_1_0: - _ze_relaxed_allocation_limits_exp_version_t = - _ze_relaxed_allocation_limits_exp_version_t(65536); -} -impl _ze_relaxed_allocation_limits_exp_version_t { - #[doc = "< latest known version"] - pub const ZE_RELAXED_ALLOCATION_LIMITS_EXP_VERSION_CURRENT: - _ze_relaxed_allocation_limits_exp_version_t = - _ze_relaxed_allocation_limits_exp_version_t(65536); -} -impl _ze_relaxed_allocation_limits_exp_version_t { - pub const ZE_RELAXED_ALLOCATION_LIMITS_EXP_VERSION_FORCE_UINT32: - _ze_relaxed_allocation_limits_exp_version_t = - _ze_relaxed_allocation_limits_exp_version_t(2147483647); -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Relaxed Allocation Limits Extension Version(s)"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_relaxed_allocation_limits_exp_version_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Relaxed Allocation Limits Extension Version(s)"] -pub use self::_ze_relaxed_allocation_limits_exp_version_t as ze_relaxed_allocation_limits_exp_version_t; -impl _ze_relaxed_allocation_limits_exp_flags_t { - #[doc = "< Allocation size may exceed ::ze_device_properties_t.maxMemAllocSize"] - pub const ZE_RELAXED_ALLOCATION_LIMITS_EXP_FLAG_MAX_SIZE: - _ze_relaxed_allocation_limits_exp_flags_t = _ze_relaxed_allocation_limits_exp_flags_t(1); -} -impl _ze_relaxed_allocation_limits_exp_flags_t { - pub const ZE_RELAXED_ALLOCATION_LIMITS_EXP_FLAG_FORCE_UINT32: - _ze_relaxed_allocation_limits_exp_flags_t = - _ze_relaxed_allocation_limits_exp_flags_t(2147483647); -} -impl ::std::ops::BitOr<_ze_relaxed_allocation_limits_exp_flags_t> - for _ze_relaxed_allocation_limits_exp_flags_t -{ - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - _ze_relaxed_allocation_limits_exp_flags_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for _ze_relaxed_allocation_limits_exp_flags_t { - #[inline] - fn bitor_assign(&mut self, rhs: _ze_relaxed_allocation_limits_exp_flags_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd<_ze_relaxed_allocation_limits_exp_flags_t> - for _ze_relaxed_allocation_limits_exp_flags_t -{ - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - _ze_relaxed_allocation_limits_exp_flags_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for _ze_relaxed_allocation_limits_exp_flags_t { - #[inline] - fn bitand_assign(&mut self, rhs: _ze_relaxed_allocation_limits_exp_flags_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[doc = ""] -#[doc = " @brief Supported relaxed memory allocation flags"] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct _ze_relaxed_allocation_limits_exp_flags_t(pub ::std::os::raw::c_uint); -#[doc = ""] -#[doc = " @brief Supported relaxed memory allocation flags"] -pub use self::_ze_relaxed_allocation_limits_exp_flags_t as ze_relaxed_allocation_limits_exp_flags_t; -#[doc = ""] -#[doc = " @brief Relaxed limits memory allocation descriptor"] -#[doc = ""] -#[doc = " @details"] -#[doc = " - This structure may be passed to ::zeMemAllocShared or"] -#[doc = " ::zeMemAllocDevice, via `pNext` member of"] -#[doc = " ::ze_device_mem_alloc_desc_t."] -#[doc = " - This structure may also be passed to ::zeMemAllocHost, via `pNext`"] -#[doc = " member of ::ze_host_mem_alloc_desc_t."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_relaxed_allocation_limits_exp_desc_t { - #[doc = "< [in] type of this structure"] - pub stype: ze_structure_type_t, - #[doc = "< [in][optional] pointer to extension-specific structure"] - pub pNext: *const ::std::os::raw::c_void, - #[doc = "< [in] flags specifying allocation limits to relax."] - #[doc = "< must be 0 (default) or a valid combination of ::ze_relaxed_allocation_limits_exp_flags_t;"] - pub flags: ze_relaxed_allocation_limits_exp_flags_t, -} -#[test] -fn bindgen_test_layout__ze_relaxed_allocation_limits_exp_desc_t() { - assert_eq!( - ::std::mem::size_of::<_ze_relaxed_allocation_limits_exp_desc_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_relaxed_allocation_limits_exp_desc_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_relaxed_allocation_limits_exp_desc_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_relaxed_allocation_limits_exp_desc_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_relaxed_allocation_limits_exp_desc_t>())).stype as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_relaxed_allocation_limits_exp_desc_t), - "::", - stringify!(stype) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_relaxed_allocation_limits_exp_desc_t>())).pNext as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_relaxed_allocation_limits_exp_desc_t), - "::", - stringify!(pNext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_relaxed_allocation_limits_exp_desc_t>())).flags as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_relaxed_allocation_limits_exp_desc_t), - "::", - stringify!(flags) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeInit"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_init_params_t { - pub pflags: *mut ze_init_flags_t, -} -#[test] -fn bindgen_test_layout__ze_init_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_init_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_init_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_init_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_init_params_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_init_params_t>())).pflags as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_init_params_t), - "::", - stringify!(pflags) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeInit"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_init_params_t = _ze_init_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeInit"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnInitCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_init_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Global callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_global_callbacks_t { - pub pfnInitCb: ze_pfnInitCb_t, -} -#[test] -fn bindgen_test_layout__ze_global_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_global_callbacks_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_global_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_global_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_global_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_global_callbacks_t>())).pfnInitCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_global_callbacks_t), - "::", - stringify!(pfnInitCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Global callback functions pointers"] -pub type ze_global_callbacks_t = _ze_global_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGet"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_get_params_t { - pub ppCount: *mut *mut u32, - pub pphDrivers: *mut *mut ze_driver_handle_t, -} -#[test] -fn bindgen_test_layout__ze_driver_get_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_get_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_driver_get_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_get_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_driver_get_params_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_driver_get_params_t>())).ppCount as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_params_t>())).pphDrivers as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_params_t), - "::", - stringify!(pphDrivers) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGet"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_driver_get_params_t = _ze_driver_get_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDriverGet"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDriverGetCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_driver_get_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetApiVersion"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_get_api_version_params_t { - pub phDriver: *mut ze_driver_handle_t, - pub pversion: *mut *mut ze_api_version_t, -} -#[test] -fn bindgen_test_layout__ze_driver_get_api_version_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_get_api_version_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_driver_get_api_version_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_get_api_version_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_driver_get_api_version_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_api_version_params_t>())).phDriver as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_api_version_params_t), - "::", - stringify!(phDriver) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_api_version_params_t>())).pversion as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_api_version_params_t), - "::", - stringify!(pversion) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetApiVersion"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_driver_get_api_version_params_t = _ze_driver_get_api_version_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDriverGetApiVersion"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDriverGetApiVersionCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_driver_get_api_version_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_get_properties_params_t { - pub phDriver: *mut ze_driver_handle_t, - pub ppDriverProperties: *mut *mut ze_driver_properties_t, -} -#[test] -fn bindgen_test_layout__ze_driver_get_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_get_properties_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_driver_get_properties_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_get_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_driver_get_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_properties_params_t>())).phDriver as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_properties_params_t), - "::", - stringify!(phDriver) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_properties_params_t>())).ppDriverProperties - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_properties_params_t), - "::", - stringify!(ppDriverProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_driver_get_properties_params_t = _ze_driver_get_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDriverGetProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDriverGetPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_driver_get_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetIpcProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_get_ipc_properties_params_t { - pub phDriver: *mut ze_driver_handle_t, - pub ppIpcProperties: *mut *mut ze_driver_ipc_properties_t, -} -#[test] -fn bindgen_test_layout__ze_driver_get_ipc_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_get_ipc_properties_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_driver_get_ipc_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_get_ipc_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_driver_get_ipc_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_ipc_properties_params_t>())).phDriver as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_ipc_properties_params_t), - "::", - stringify!(phDriver) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_ipc_properties_params_t>())).ppIpcProperties - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_ipc_properties_params_t), - "::", - stringify!(ppIpcProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetIpcProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_driver_get_ipc_properties_params_t = _ze_driver_get_ipc_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDriverGetIpcProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDriverGetIpcPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_driver_get_ipc_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetExtensionProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_driver_get_extension_properties_params_t { - pub phDriver: *mut ze_driver_handle_t, - pub ppCount: *mut *mut u32, - pub ppExtensionProperties: *mut *mut ze_driver_extension_properties_t, -} -#[test] -fn bindgen_test_layout__ze_driver_get_extension_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_get_extension_properties_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_driver_get_extension_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_get_extension_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_driver_get_extension_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_extension_properties_params_t>())).phDriver - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_extension_properties_params_t), - "::", - stringify!(phDriver) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_extension_properties_params_t>())).ppCount - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_extension_properties_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_get_extension_properties_params_t>())) - .ppExtensionProperties as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_get_extension_properties_params_t), - "::", - stringify!(ppExtensionProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDriverGetExtensionProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_driver_get_extension_properties_params_t = _ze_driver_get_extension_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDriverGetExtensionProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDriverGetExtensionPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_driver_get_extension_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Driver callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_driver_callbacks_t { - pub pfnGetCb: ze_pfnDriverGetCb_t, - pub pfnGetApiVersionCb: ze_pfnDriverGetApiVersionCb_t, - pub pfnGetPropertiesCb: ze_pfnDriverGetPropertiesCb_t, - pub pfnGetIpcPropertiesCb: ze_pfnDriverGetIpcPropertiesCb_t, - pub pfnGetExtensionPropertiesCb: ze_pfnDriverGetExtensionPropertiesCb_t, -} -#[test] -fn bindgen_test_layout__ze_driver_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_driver_callbacks_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_driver_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_driver_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_driver_callbacks_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_driver_callbacks_t>())).pfnGetCb as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_callbacks_t), - "::", - stringify!(pfnGetCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_callbacks_t>())).pfnGetApiVersionCb as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_callbacks_t), - "::", - stringify!(pfnGetApiVersionCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_callbacks_t>())).pfnGetPropertiesCb as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_callbacks_t), - "::", - stringify!(pfnGetPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_callbacks_t>())).pfnGetIpcPropertiesCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_callbacks_t), - "::", - stringify!(pfnGetIpcPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_driver_callbacks_t>())).pfnGetExtensionPropertiesCb - as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_driver_callbacks_t), - "::", - stringify!(pfnGetExtensionPropertiesCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Driver callback functions pointers"] -pub type ze_driver_callbacks_t = _ze_driver_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGet"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_params_t { - pub phDriver: *mut ze_driver_handle_t, - pub ppCount: *mut *mut u32, - pub pphDevices: *mut *mut ze_device_handle_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_device_get_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_get_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_params_t>())).phDriver as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_params_t), - "::", - stringify!(phDriver) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_get_params_t>())).ppCount as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_params_t>())).pphDevices as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_params_t), - "::", - stringify!(pphDevices) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGet"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_params_t = _ze_device_get_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGet"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetSubDevices"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_sub_devices_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppCount: *mut *mut u32, - pub pphSubdevices: *mut *mut ze_device_handle_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_sub_devices_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_sub_devices_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_device_get_sub_devices_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_sub_devices_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_sub_devices_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_sub_devices_params_t>())).phDevice as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_sub_devices_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_sub_devices_params_t>())).ppCount as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_sub_devices_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_sub_devices_params_t>())).pphSubdevices - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_sub_devices_params_t), - "::", - stringify!(pphSubdevices) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetSubDevices"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_sub_devices_params_t = _ze_device_get_sub_devices_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetSubDevices"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetSubDevicesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_sub_devices_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppDeviceProperties: *mut *mut ze_device_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_properties_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_device_get_properties_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_properties_params_t>())).phDevice as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_properties_params_t>())).ppDeviceProperties - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_properties_params_t), - "::", - stringify!(ppDeviceProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_properties_params_t = _ze_device_get_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetComputeProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_compute_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppComputeProperties: *mut *mut ze_device_compute_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_compute_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_compute_properties_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_compute_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_compute_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_compute_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_compute_properties_params_t>())).phDevice - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_compute_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_compute_properties_params_t>())) - .ppComputeProperties as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_compute_properties_params_t), - "::", - stringify!(ppComputeProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetComputeProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_compute_properties_params_t = _ze_device_get_compute_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetComputeProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetComputePropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_compute_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetModuleProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_module_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppModuleProperties: *mut *mut ze_device_module_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_module_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_module_properties_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_module_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_module_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_module_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_module_properties_params_t>())).phDevice - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_module_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_module_properties_params_t>())).ppModuleProperties - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_module_properties_params_t), - "::", - stringify!(ppModuleProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetModuleProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_module_properties_params_t = _ze_device_get_module_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetModuleProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetModulePropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_module_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetCommandQueueGroupProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_command_queue_group_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppCount: *mut *mut u32, - pub ppCommandQueueGroupProperties: *mut *mut ze_command_queue_group_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_command_queue_group_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_command_queue_group_properties_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_command_queue_group_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_command_queue_group_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_command_queue_group_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_command_queue_group_properties_params_t>())) - .phDevice as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_command_queue_group_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_command_queue_group_properties_params_t>())) - .ppCount as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_command_queue_group_properties_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_command_queue_group_properties_params_t>())) - .ppCommandQueueGroupProperties as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_command_queue_group_properties_params_t), - "::", - stringify!(ppCommandQueueGroupProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetCommandQueueGroupProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_command_queue_group_properties_params_t = - _ze_device_get_command_queue_group_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetCommandQueueGroupProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetCommandQueueGroupPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_command_queue_group_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetMemoryProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_memory_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppCount: *mut *mut u32, - pub ppMemProperties: *mut *mut ze_device_memory_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_memory_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_memory_properties_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_memory_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_memory_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_memory_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_memory_properties_params_t>())).phDevice - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_memory_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_memory_properties_params_t>())).ppCount - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_memory_properties_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_memory_properties_params_t>())).ppMemProperties - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_memory_properties_params_t), - "::", - stringify!(ppMemProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetMemoryProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_memory_properties_params_t = _ze_device_get_memory_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetMemoryProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetMemoryPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_memory_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetMemoryAccessProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_memory_access_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppMemAccessProperties: *mut *mut ze_device_memory_access_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_memory_access_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_memory_access_properties_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_memory_access_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_memory_access_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_memory_access_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_memory_access_properties_params_t>())).phDevice - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_memory_access_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_memory_access_properties_params_t>())) - .ppMemAccessProperties as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_memory_access_properties_params_t), - "::", - stringify!(ppMemAccessProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetMemoryAccessProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_memory_access_properties_params_t = - _ze_device_get_memory_access_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetMemoryAccessProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetMemoryAccessPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_memory_access_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetCacheProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_cache_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppCount: *mut *mut u32, - pub ppCacheProperties: *mut *mut ze_device_cache_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_cache_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_cache_properties_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_cache_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_cache_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_cache_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_cache_properties_params_t>())).phDevice - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_cache_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_cache_properties_params_t>())).ppCount as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_cache_properties_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_cache_properties_params_t>())).ppCacheProperties - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_cache_properties_params_t), - "::", - stringify!(ppCacheProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetCacheProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_cache_properties_params_t = _ze_device_get_cache_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetCacheProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetCachePropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_cache_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetImageProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_image_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppImageProperties: *mut *mut ze_device_image_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_image_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_image_properties_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_image_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_image_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_image_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_image_properties_params_t>())).phDevice - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_image_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_image_properties_params_t>())).ppImageProperties - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_image_properties_params_t), - "::", - stringify!(ppImageProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetImageProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_image_properties_params_t = _ze_device_get_image_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetImageProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetImagePropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_image_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetExternalMemoryProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_external_memory_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub ppExternalMemoryProperties: *mut *mut ze_device_external_memory_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_external_memory_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_external_memory_properties_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_external_memory_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_external_memory_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_external_memory_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_external_memory_properties_params_t>())).phDevice - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_external_memory_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_external_memory_properties_params_t>())) - .ppExternalMemoryProperties as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_external_memory_properties_params_t), - "::", - stringify!(ppExternalMemoryProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetExternalMemoryProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_external_memory_properties_params_t = - _ze_device_get_external_memory_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetExternalMemoryProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetExternalMemoryPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_external_memory_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetP2PProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_p2_p_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub phPeerDevice: *mut ze_device_handle_t, - pub ppP2PProperties: *mut *mut ze_device_p2p_properties_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_p2_p_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_p2_p_properties_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_device_get_p2_p_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_p2_p_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_get_p2_p_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_p2_p_properties_params_t>())).phDevice as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_p2_p_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_p2_p_properties_params_t>())).phPeerDevice - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_p2_p_properties_params_t), - "::", - stringify!(phPeerDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_p2_p_properties_params_t>())).ppP2PProperties - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_p2_p_properties_params_t), - "::", - stringify!(ppP2PProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetP2PProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_p2_p_properties_params_t = _ze_device_get_p2_p_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetP2PProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetP2PPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_p2_p_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceCanAccessPeer"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_can_access_peer_params_t { - pub phDevice: *mut ze_device_handle_t, - pub phPeerDevice: *mut ze_device_handle_t, - pub pvalue: *mut *mut ze_bool_t, -} -#[test] -fn bindgen_test_layout__ze_device_can_access_peer_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_can_access_peer_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_device_can_access_peer_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_can_access_peer_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_device_can_access_peer_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_can_access_peer_params_t>())).phDevice as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_can_access_peer_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_can_access_peer_params_t>())).phPeerDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_can_access_peer_params_t), - "::", - stringify!(phPeerDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_can_access_peer_params_t>())).pvalue as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_can_access_peer_params_t), - "::", - stringify!(pvalue) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceCanAccessPeer"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_can_access_peer_params_t = _ze_device_can_access_peer_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceCanAccessPeer"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceCanAccessPeerCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_can_access_peer_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_device_get_status_params_t { - pub phDevice: *mut ze_device_handle_t, -} -#[test] -fn bindgen_test_layout__ze_device_get_status_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_get_status_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_device_get_status_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_get_status_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_get_status_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_get_status_params_t>())).phDevice as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_get_status_params_t), - "::", - stringify!(phDevice) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeDeviceGetStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_device_get_status_params_t = _ze_device_get_status_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeDeviceGetStatus"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnDeviceGetStatusCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_device_get_status_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Device callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_device_callbacks_t { - pub pfnGetCb: ze_pfnDeviceGetCb_t, - pub pfnGetSubDevicesCb: ze_pfnDeviceGetSubDevicesCb_t, - pub pfnGetPropertiesCb: ze_pfnDeviceGetPropertiesCb_t, - pub pfnGetComputePropertiesCb: ze_pfnDeviceGetComputePropertiesCb_t, - pub pfnGetModulePropertiesCb: ze_pfnDeviceGetModulePropertiesCb_t, - pub pfnGetCommandQueueGroupPropertiesCb: ze_pfnDeviceGetCommandQueueGroupPropertiesCb_t, - pub pfnGetMemoryPropertiesCb: ze_pfnDeviceGetMemoryPropertiesCb_t, - pub pfnGetMemoryAccessPropertiesCb: ze_pfnDeviceGetMemoryAccessPropertiesCb_t, - pub pfnGetCachePropertiesCb: ze_pfnDeviceGetCachePropertiesCb_t, - pub pfnGetImagePropertiesCb: ze_pfnDeviceGetImagePropertiesCb_t, - pub pfnGetExternalMemoryPropertiesCb: ze_pfnDeviceGetExternalMemoryPropertiesCb_t, - pub pfnGetP2PPropertiesCb: ze_pfnDeviceGetP2PPropertiesCb_t, - pub pfnCanAccessPeerCb: ze_pfnDeviceCanAccessPeerCb_t, - pub pfnGetStatusCb: ze_pfnDeviceGetStatusCb_t, -} -#[test] -fn bindgen_test_layout__ze_device_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_device_callbacks_t>(), - 112usize, - concat!("Size of: ", stringify!(_ze_device_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_device_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_device_callbacks_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetCb as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetSubDevicesCb as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetSubDevicesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetPropertiesCb as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetComputePropertiesCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetComputePropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetModulePropertiesCb as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetModulePropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetCommandQueueGroupPropertiesCb - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetCommandQueueGroupPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetMemoryPropertiesCb as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetMemoryPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetMemoryAccessPropertiesCb - as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetMemoryAccessPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetCachePropertiesCb as *const _ - as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetCachePropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetImagePropertiesCb as *const _ - as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetImagePropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetExternalMemoryPropertiesCb - as *const _ as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetExternalMemoryPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetP2PPropertiesCb as *const _ - as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetP2PPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnCanAccessPeerCb as *const _ - as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnCanAccessPeerCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_device_callbacks_t>())).pfnGetStatusCb as *const _ as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(_ze_device_callbacks_t), - "::", - stringify!(pfnGetStatusCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Device callback functions pointers"] -pub type ze_device_callbacks_t = _ze_device_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_create_params_t { - pub phDriver: *mut ze_driver_handle_t, - pub pdesc: *mut *const ze_context_desc_t, - pub pphContext: *mut *mut ze_context_handle_t, -} -#[test] -fn bindgen_test_layout__ze_context_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_create_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_context_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_context_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_create_params_t>())).phDriver as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_create_params_t), - "::", - stringify!(phDriver) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_create_params_t>())).pdesc as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_create_params_t>())).pphContext as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_create_params_t), - "::", - stringify!(pphContext) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_create_params_t = _ze_context_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_destroy_params_t { - pub phContext: *mut ze_context_handle_t, -} -#[test] -fn bindgen_test_layout__ze_context_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_context_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_context_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_destroy_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_destroy_params_t), - "::", - stringify!(phContext) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_destroy_params_t = _ze_context_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextGetStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_get_status_params_t { - pub phContext: *mut ze_context_handle_t, -} -#[test] -fn bindgen_test_layout__ze_context_get_status_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_get_status_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_context_get_status_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_get_status_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_context_get_status_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_get_status_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_get_status_params_t), - "::", - stringify!(phContext) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextGetStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_get_status_params_t = _ze_context_get_status_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextGetStatus"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextGetStatusCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_get_status_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextSystemBarrier"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_system_barrier_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, -} -#[test] -fn bindgen_test_layout__ze_context_system_barrier_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_system_barrier_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_context_system_barrier_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_system_barrier_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_context_system_barrier_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_system_barrier_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_system_barrier_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_system_barrier_params_t>())).phDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_system_barrier_params_t), - "::", - stringify!(phDevice) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextSystemBarrier"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_system_barrier_params_t = _ze_context_system_barrier_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextSystemBarrier"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextSystemBarrierCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_system_barrier_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextMakeMemoryResident"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_make_memory_resident_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pptr: *mut *mut ::std::os::raw::c_void, - pub psize: *mut usize, -} -#[test] -fn bindgen_test_layout__ze_context_make_memory_resident_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_make_memory_resident_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_context_make_memory_resident_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_make_memory_resident_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_context_make_memory_resident_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_make_memory_resident_params_t>())).phContext - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_make_memory_resident_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_make_memory_resident_params_t>())).phDevice - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_make_memory_resident_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_make_memory_resident_params_t>())).pptr as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_make_memory_resident_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_make_memory_resident_params_t>())).psize as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_make_memory_resident_params_t), - "::", - stringify!(psize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextMakeMemoryResident"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_make_memory_resident_params_t = _ze_context_make_memory_resident_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextMakeMemoryResident"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextMakeMemoryResidentCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_make_memory_resident_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextEvictMemory"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_evict_memory_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pptr: *mut *mut ::std::os::raw::c_void, - pub psize: *mut usize, -} -#[test] -fn bindgen_test_layout__ze_context_evict_memory_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_evict_memory_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_context_evict_memory_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_evict_memory_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_context_evict_memory_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_evict_memory_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_evict_memory_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_evict_memory_params_t>())).phDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_evict_memory_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_evict_memory_params_t>())).pptr as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_evict_memory_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_evict_memory_params_t>())).psize as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_evict_memory_params_t), - "::", - stringify!(psize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextEvictMemory"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_evict_memory_params_t = _ze_context_evict_memory_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextEvictMemory"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextEvictMemoryCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_evict_memory_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextMakeImageResident"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_make_image_resident_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub phImage: *mut ze_image_handle_t, -} -#[test] -fn bindgen_test_layout__ze_context_make_image_resident_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_make_image_resident_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_context_make_image_resident_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_make_image_resident_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_context_make_image_resident_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_make_image_resident_params_t>())).phContext - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_make_image_resident_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_make_image_resident_params_t>())).phDevice - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_make_image_resident_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_make_image_resident_params_t>())).phImage as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_make_image_resident_params_t), - "::", - stringify!(phImage) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextMakeImageResident"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_make_image_resident_params_t = _ze_context_make_image_resident_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextMakeImageResident"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextMakeImageResidentCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_make_image_resident_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextEvictImage"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_context_evict_image_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub phImage: *mut ze_image_handle_t, -} -#[test] -fn bindgen_test_layout__ze_context_evict_image_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_evict_image_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_context_evict_image_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_evict_image_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_context_evict_image_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_evict_image_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_evict_image_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_evict_image_params_t>())).phDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_evict_image_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_evict_image_params_t>())).phImage as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_evict_image_params_t), - "::", - stringify!(phImage) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeContextEvictImage"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_context_evict_image_params_t = _ze_context_evict_image_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeContextEvictImage"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnContextEvictImageCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_context_evict_image_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Context callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_context_callbacks_t { - pub pfnCreateCb: ze_pfnContextCreateCb_t, - pub pfnDestroyCb: ze_pfnContextDestroyCb_t, - pub pfnGetStatusCb: ze_pfnContextGetStatusCb_t, - pub pfnSystemBarrierCb: ze_pfnContextSystemBarrierCb_t, - pub pfnMakeMemoryResidentCb: ze_pfnContextMakeMemoryResidentCb_t, - pub pfnEvictMemoryCb: ze_pfnContextEvictMemoryCb_t, - pub pfnMakeImageResidentCb: ze_pfnContextMakeImageResidentCb_t, - pub pfnEvictImageCb: ze_pfnContextEvictImageCb_t, -} -#[test] -fn bindgen_test_layout__ze_context_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_context_callbacks_t>(), - 64usize, - concat!("Size of: ", stringify!(_ze_context_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_context_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_context_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnGetStatusCb as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnGetStatusCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnSystemBarrierCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnSystemBarrierCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnMakeMemoryResidentCb as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnMakeMemoryResidentCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnEvictMemoryCb as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnEvictMemoryCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnMakeImageResidentCb as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnMakeImageResidentCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_context_callbacks_t>())).pfnEvictImageCb as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_context_callbacks_t), - "::", - stringify!(pfnEvictImageCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Context callback functions pointers"] -pub type ze_context_callbacks_t = _ze_context_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_queue_create_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pdesc: *mut *const ze_command_queue_desc_t, - pub pphCommandQueue: *mut *mut ze_command_queue_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_queue_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_queue_create_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_command_queue_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_queue_create_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_queue_create_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_create_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_create_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_create_params_t>())).phDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_create_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_create_params_t>())).pdesc as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_create_params_t>())).pphCommandQueue - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_create_params_t), - "::", - stringify!(pphCommandQueue) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_queue_create_params_t = _ze_command_queue_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandQueueCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandQueueCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_queue_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_queue_destroy_params_t { - pub phCommandQueue: *mut ze_command_queue_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_queue_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_queue_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_command_queue_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_queue_destroy_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_queue_destroy_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_destroy_params_t>())).phCommandQueue - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_destroy_params_t), - "::", - stringify!(phCommandQueue) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_queue_destroy_params_t = _ze_command_queue_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandQueueDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandQueueDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_queue_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueExecuteCommandLists"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_queue_execute_command_lists_params_t { - pub phCommandQueue: *mut ze_command_queue_handle_t, - pub pnumCommandLists: *mut u32, - pub pphCommandLists: *mut *mut ze_command_list_handle_t, - pub phFence: *mut ze_fence_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_queue_execute_command_lists_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_queue_execute_command_lists_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_command_queue_execute_command_lists_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_queue_execute_command_lists_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_queue_execute_command_lists_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_execute_command_lists_params_t>())) - .phCommandQueue as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_execute_command_lists_params_t), - "::", - stringify!(phCommandQueue) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_execute_command_lists_params_t>())) - .pnumCommandLists as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_execute_command_lists_params_t), - "::", - stringify!(pnumCommandLists) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_execute_command_lists_params_t>())) - .pphCommandLists as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_execute_command_lists_params_t), - "::", - stringify!(pphCommandLists) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_execute_command_lists_params_t>())).phFence - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_execute_command_lists_params_t), - "::", - stringify!(phFence) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueExecuteCommandLists"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_queue_execute_command_lists_params_t = - _ze_command_queue_execute_command_lists_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandQueueExecuteCommandLists"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandQueueExecuteCommandListsCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_queue_execute_command_lists_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueSynchronize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_queue_synchronize_params_t { - pub phCommandQueue: *mut ze_command_queue_handle_t, - pub ptimeout: *mut u64, -} -#[test] -fn bindgen_test_layout__ze_command_queue_synchronize_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_queue_synchronize_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_command_queue_synchronize_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_queue_synchronize_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_queue_synchronize_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_synchronize_params_t>())).phCommandQueue - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_synchronize_params_t), - "::", - stringify!(phCommandQueue) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_synchronize_params_t>())).ptimeout as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_synchronize_params_t), - "::", - stringify!(ptimeout) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandQueueSynchronize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_queue_synchronize_params_t = _ze_command_queue_synchronize_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandQueueSynchronize"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandQueueSynchronizeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_queue_synchronize_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of CommandQueue callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_command_queue_callbacks_t { - pub pfnCreateCb: ze_pfnCommandQueueCreateCb_t, - pub pfnDestroyCb: ze_pfnCommandQueueDestroyCb_t, - pub pfnExecuteCommandListsCb: ze_pfnCommandQueueExecuteCommandListsCb_t, - pub pfnSynchronizeCb: ze_pfnCommandQueueSynchronizeCb_t, -} -#[test] -fn bindgen_test_layout__ze_command_queue_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_queue_callbacks_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_command_queue_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_queue_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_command_queue_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_callbacks_t>())).pfnCreateCb as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_callbacks_t>())).pfnDestroyCb as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_callbacks_t>())).pfnExecuteCommandListsCb - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_callbacks_t), - "::", - stringify!(pfnExecuteCommandListsCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_queue_callbacks_t>())).pfnSynchronizeCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_queue_callbacks_t), - "::", - stringify!(pfnSynchronizeCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of CommandQueue callback functions pointers"] -pub type ze_command_queue_callbacks_t = _ze_command_queue_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_create_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pdesc: *mut *const ze_command_list_desc_t, - pub pphCommandList: *mut *mut ze_command_list_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_create_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_command_list_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_create_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_create_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_params_t>())).phDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_params_t>())).pdesc as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_params_t>())).pphCommandList as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_params_t), - "::", - stringify!(pphCommandList) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_create_params_t = _ze_command_list_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListCreateImmediate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_create_immediate_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub paltdesc: *mut *const ze_command_queue_desc_t, - pub pphCommandList: *mut *mut ze_command_list_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_create_immediate_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_create_immediate_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_create_immediate_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_create_immediate_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_create_immediate_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_immediate_params_t>())).phContext - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_immediate_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_immediate_params_t>())).phDevice - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_immediate_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_immediate_params_t>())).paltdesc - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_immediate_params_t), - "::", - stringify!(paltdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_create_immediate_params_t>())).pphCommandList - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_create_immediate_params_t), - "::", - stringify!(pphCommandList) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListCreateImmediate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_create_immediate_params_t = _ze_command_list_create_immediate_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListCreateImmediate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListCreateImmediateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_create_immediate_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_destroy_params_t { - pub phCommandList: *mut ze_command_list_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_command_list_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_destroy_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_destroy_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_destroy_params_t>())).phCommandList as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_destroy_params_t), - "::", - stringify!(phCommandList) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_destroy_params_t = _ze_command_list_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListClose"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_close_params_t { - pub phCommandList: *mut ze_command_list_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_close_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_close_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_command_list_close_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_close_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_command_list_close_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_close_params_t>())).phCommandList as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_close_params_t), - "::", - stringify!(phCommandList) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListClose"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_close_params_t = _ze_command_list_close_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListClose"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListCloseCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_close_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_reset_params_t { - pub phCommandList: *mut ze_command_list_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_reset_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_reset_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_command_list_reset_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_reset_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_command_list_reset_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_reset_params_t>())).phCommandList as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_reset_params_t), - "::", - stringify!(phCommandList) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_reset_params_t = _ze_command_list_reset_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListReset"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListResetCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_reset_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendWriteGlobalTimestamp"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_write_global_timestamp_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pdstptr: *mut *mut u64, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_write_global_timestamp_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_write_global_timestamp_params_t>(), - 40usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_write_global_timestamp_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_write_global_timestamp_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_write_global_timestamp_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_write_global_timestamp_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_write_global_timestamp_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_write_global_timestamp_params_t>())) - .pdstptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_write_global_timestamp_params_t), - "::", - stringify!(pdstptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_write_global_timestamp_params_t>())) - .phSignalEvent as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_write_global_timestamp_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_write_global_timestamp_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_write_global_timestamp_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_write_global_timestamp_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_write_global_timestamp_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendWriteGlobalTimestamp"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_write_global_timestamp_params_t = - _ze_command_list_append_write_global_timestamp_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendWriteGlobalTimestamp"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendWriteGlobalTimestampCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_write_global_timestamp_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendBarrier"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_barrier_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_barrier_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_barrier_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_barrier_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_barrier_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_barrier_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_barrier_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_barrier_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_barrier_params_t>())).phSignalEvent - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_barrier_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_barrier_params_t>())).pnumWaitEvents - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_barrier_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_barrier_params_t>())).pphWaitEvents - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_barrier_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendBarrier"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_barrier_params_t = _ze_command_list_append_barrier_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendBarrier"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendBarrierCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_barrier_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryRangesBarrier"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_memory_ranges_barrier_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pnumRanges: *mut u32, - pub ppRangeSizes: *mut *const usize, - pub ppRanges: *mut *mut *const ::std::os::raw::c_void, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_memory_ranges_barrier_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_memory_ranges_barrier_params_t>(), - 56usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_memory_ranges_barrier_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_ranges_barrier_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_ranges_barrier_params_t>())) - .pnumRanges as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t), - "::", - stringify!(pnumRanges) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_ranges_barrier_params_t>())) - .ppRangeSizes as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t), - "::", - stringify!(ppRangeSizes) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_ranges_barrier_params_t>())) - .ppRanges as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t), - "::", - stringify!(ppRanges) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_ranges_barrier_params_t>())) - .phSignalEvent as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_ranges_barrier_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_ranges_barrier_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_ranges_barrier_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryRangesBarrier"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_memory_ranges_barrier_params_t = - _ze_command_list_append_memory_ranges_barrier_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendMemoryRangesBarrier"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendMemoryRangesBarrierCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_memory_ranges_barrier_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryCopy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_memory_copy_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pdstptr: *mut *mut ::std::os::raw::c_void, - pub psrcptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_memory_copy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_memory_copy_params_t>(), - 56usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_memory_copy_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_memory_copy_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_memory_copy_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_params_t>())).pdstptr - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_params_t), - "::", - stringify!(pdstptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_params_t>())).psrcptr - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_params_t), - "::", - stringify!(psrcptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_params_t>())).psize - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_params_t>())).phSignalEvent - as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_params_t>())).pnumWaitEvents - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_params_t>())).pphWaitEvents - as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryCopy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_memory_copy_params_t = _ze_command_list_append_memory_copy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendMemoryCopy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendMemoryCopyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_memory_copy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryFill"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_memory_fill_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pptr: *mut *mut ::std::os::raw::c_void, - pub ppattern: *mut *const ::std::os::raw::c_void, - pub ppattern_size: *mut usize, - pub psize: *mut usize, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_memory_fill_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_memory_fill_params_t>(), - 64usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_memory_fill_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_memory_fill_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_memory_fill_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).pptr - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).ppattern - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(ppattern) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).ppattern_size - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(ppattern_size) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).psize - as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).phSignalEvent - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).pnumWaitEvents - as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_fill_params_t>())).pphWaitEvents - as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_fill_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryFill"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_memory_fill_params_t = _ze_command_list_append_memory_fill_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendMemoryFill"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendMemoryFillCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_memory_fill_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryCopyRegion"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_memory_copy_region_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pdstptr: *mut *mut ::std::os::raw::c_void, - pub pdstRegion: *mut *const ze_copy_region_t, - pub pdstPitch: *mut u32, - pub pdstSlicePitch: *mut u32, - pub psrcptr: *mut *const ::std::os::raw::c_void, - pub psrcRegion: *mut *const ze_copy_region_t, - pub psrcPitch: *mut u32, - pub psrcSlicePitch: *mut u32, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_memory_copy_region_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_memory_copy_region_params_t>(), - 96usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_memory_copy_region_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_memory_copy_region_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())).pdstptr - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(pdstptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .pdstRegion as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(pdstRegion) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .pdstPitch as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(pdstPitch) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .pdstSlicePitch as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(pdstSlicePitch) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())).psrcptr - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(psrcptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .psrcRegion as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(psrcRegion) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .psrcPitch as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(psrcPitch) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .psrcSlicePitch as *const _ as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(psrcSlicePitch) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .phSignalEvent as *const _ as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_region_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_region_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryCopyRegion"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_memory_copy_region_params_t = - _ze_command_list_append_memory_copy_region_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendMemoryCopyRegion"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendMemoryCopyRegionCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_memory_copy_region_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryCopyFromContext"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_memory_copy_from_context_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pdstptr: *mut *mut ::std::os::raw::c_void, - pub phContextSrc: *mut ze_context_handle_t, - pub psrcptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_memory_copy_from_context_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_memory_copy_from_context_params_t>(), - 64usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_memory_copy_from_context_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .pdstptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(pdstptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .phContextSrc as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(phContextSrc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .psrcptr as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(psrcptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .psize as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .phSignalEvent as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_copy_from_context_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_copy_from_context_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryCopyFromContext"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_memory_copy_from_context_params_t = - _ze_command_list_append_memory_copy_from_context_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendMemoryCopyFromContext"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendMemoryCopyFromContextCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_memory_copy_from_context_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_image_copy_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phDstImage: *mut ze_image_handle_t, - pub phSrcImage: *mut ze_image_handle_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_image_copy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_image_copy_params_t>(), - 48usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_image_copy_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_image_copy_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_image_copy_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_params_t>())).phDstImage - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_params_t), - "::", - stringify!(phDstImage) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_params_t>())).phSrcImage - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_params_t), - "::", - stringify!(phSrcImage) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_params_t>())).phSignalEvent - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_params_t>())).pnumWaitEvents - as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_params_t>())).pphWaitEvents - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_image_copy_params_t = _ze_command_list_append_image_copy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendImageCopy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendImageCopyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_image_copy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopyRegion"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_image_copy_region_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phDstImage: *mut ze_image_handle_t, - pub phSrcImage: *mut ze_image_handle_t, - pub ppDstRegion: *mut *const ze_image_region_t, - pub ppSrcRegion: *mut *const ze_image_region_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_image_copy_region_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_image_copy_region_params_t>(), - 64usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_image_copy_region_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_image_copy_region_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_image_copy_region_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .phDstImage as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(phDstImage) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .phSrcImage as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(phSrcImage) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .ppDstRegion as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(ppDstRegion) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .ppSrcRegion as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(ppSrcRegion) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .phSignalEvent as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_region_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_region_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopyRegion"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_image_copy_region_params_t = - _ze_command_list_append_image_copy_region_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendImageCopyRegion"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendImageCopyRegionCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_image_copy_region_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopyToMemory"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_image_copy_to_memory_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pdstptr: *mut *mut ::std::os::raw::c_void, - pub phSrcImage: *mut ze_image_handle_t, - pub ppSrcRegion: *mut *const ze_image_region_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_image_copy_to_memory_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_image_copy_to_memory_params_t>(), - 56usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_image_copy_to_memory_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_to_memory_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_to_memory_params_t>())) - .pdstptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t), - "::", - stringify!(pdstptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_to_memory_params_t>())) - .phSrcImage as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t), - "::", - stringify!(phSrcImage) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_to_memory_params_t>())) - .ppSrcRegion as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t), - "::", - stringify!(ppSrcRegion) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_to_memory_params_t>())) - .phSignalEvent as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_to_memory_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_to_memory_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_to_memory_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopyToMemory"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_image_copy_to_memory_params_t = - _ze_command_list_append_image_copy_to_memory_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendImageCopyToMemory"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendImageCopyToMemoryCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_image_copy_to_memory_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopyFromMemory"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_image_copy_from_memory_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phDstImage: *mut ze_image_handle_t, - pub psrcptr: *mut *const ::std::os::raw::c_void, - pub ppDstRegion: *mut *const ze_image_region_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_image_copy_from_memory_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_image_copy_from_memory_params_t>(), - 56usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_image_copy_from_memory_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_from_memory_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_from_memory_params_t>())) - .phDstImage as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t), - "::", - stringify!(phDstImage) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_from_memory_params_t>())) - .psrcptr as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t), - "::", - stringify!(psrcptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_from_memory_params_t>())) - .ppDstRegion as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t), - "::", - stringify!(ppDstRegion) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_from_memory_params_t>())) - .phSignalEvent as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_from_memory_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_image_copy_from_memory_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_image_copy_from_memory_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendImageCopyFromMemory"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_image_copy_from_memory_params_t = - _ze_command_list_append_image_copy_from_memory_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendImageCopyFromMemory"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendImageCopyFromMemoryCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_image_copy_from_memory_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryPrefetch"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_memory_prefetch_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_memory_prefetch_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_memory_prefetch_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_memory_prefetch_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_memory_prefetch_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_memory_prefetch_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_prefetch_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_prefetch_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_prefetch_params_t>())).pptr - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_prefetch_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_memory_prefetch_params_t>())).psize - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_memory_prefetch_params_t), - "::", - stringify!(psize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemoryPrefetch"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_memory_prefetch_params_t = - _ze_command_list_append_memory_prefetch_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendMemoryPrefetch"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendMemoryPrefetchCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_memory_prefetch_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemAdvise"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_mem_advise_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, - pub padvice: *mut ze_memory_advice_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_mem_advise_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_mem_advise_params_t>(), - 40usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_mem_advise_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_mem_advise_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_mem_advise_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_mem_advise_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_mem_advise_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_mem_advise_params_t>())).phDevice - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_mem_advise_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_mem_advise_params_t>())).pptr as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_mem_advise_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_mem_advise_params_t>())).psize - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_mem_advise_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_mem_advise_params_t>())).padvice - as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_mem_advise_params_t), - "::", - stringify!(padvice) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendMemAdvise"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_mem_advise_params_t = _ze_command_list_append_mem_advise_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendMemAdvise"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendMemAdviseCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_mem_advise_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendSignalEvent"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_signal_event_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phEvent: *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_signal_event_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_signal_event_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_signal_event_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_signal_event_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_signal_event_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_signal_event_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_signal_event_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_signal_event_params_t>())).phEvent - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_signal_event_params_t), - "::", - stringify!(phEvent) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendSignalEvent"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_signal_event_params_t = - _ze_command_list_append_signal_event_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendSignalEvent"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendSignalEventCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_signal_event_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendWaitOnEvents"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_wait_on_events_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pnumEvents: *mut u32, - pub pphEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_wait_on_events_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_wait_on_events_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_wait_on_events_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_wait_on_events_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_wait_on_events_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_wait_on_events_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_wait_on_events_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_wait_on_events_params_t>())).pnumEvents - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_wait_on_events_params_t), - "::", - stringify!(pnumEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_wait_on_events_params_t>())).pphEvents - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_wait_on_events_params_t), - "::", - stringify!(pphEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendWaitOnEvents"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_wait_on_events_params_t = - _ze_command_list_append_wait_on_events_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendWaitOnEvents"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendWaitOnEventsCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_wait_on_events_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendEventReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_event_reset_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phEvent: *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_event_reset_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_event_reset_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_event_reset_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_event_reset_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_event_reset_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_event_reset_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_event_reset_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_event_reset_params_t>())).phEvent - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_event_reset_params_t), - "::", - stringify!(phEvent) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendEventReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_event_reset_params_t = _ze_command_list_append_event_reset_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendEventReset"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendEventResetCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_event_reset_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendQueryKernelTimestamps"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_query_kernel_timestamps_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pnumEvents: *mut u32, - pub pphEvents: *mut *mut ze_event_handle_t, - pub pdstptr: *mut *mut ::std::os::raw::c_void, - pub ppOffsets: *mut *const usize, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_query_kernel_timestamps_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_query_kernel_timestamps_params_t>(), - 64usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_query_kernel_timestamps_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .pnumEvents as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(pnumEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .pphEvents as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(pphEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .pdstptr as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(pdstptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .ppOffsets as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(ppOffsets) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .phSignalEvent as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_query_kernel_timestamps_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_query_kernel_timestamps_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendQueryKernelTimestamps"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_query_kernel_timestamps_params_t = - _ze_command_list_append_query_kernel_timestamps_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendQueryKernelTimestamps"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendQueryKernelTimestampsCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_query_kernel_timestamps_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchKernel"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_launch_kernel_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phKernel: *mut ze_kernel_handle_t, - pub ppLaunchFuncArgs: *mut *const ze_group_count_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_launch_kernel_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_launch_kernel_params_t>(), - 48usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_launch_kernel_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_launch_kernel_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_launch_kernel_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_params_t>())).phCommandList - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_params_t>())).phKernel - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_params_t>())) - .ppLaunchFuncArgs as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_params_t), - "::", - stringify!(ppLaunchFuncArgs) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_params_t>())).phSignalEvent - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_params_t>())).pphWaitEvents - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchKernel"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_launch_kernel_params_t = - _ze_command_list_append_launch_kernel_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendLaunchKernel"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendLaunchKernelCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_launch_kernel_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchCooperativeKernel"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_launch_cooperative_kernel_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phKernel: *mut ze_kernel_handle_t, - pub ppLaunchFuncArgs: *mut *const ze_group_count_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_launch_cooperative_kernel_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_launch_cooperative_kernel_params_t>(), - 48usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_launch_cooperative_kernel_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_cooperative_kernel_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_cooperative_kernel_params_t>())) - .phKernel as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_cooperative_kernel_params_t>())) - .ppLaunchFuncArgs as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t), - "::", - stringify!(ppLaunchFuncArgs) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_cooperative_kernel_params_t>())) - .phSignalEvent as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_cooperative_kernel_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_cooperative_kernel_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_cooperative_kernel_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchCooperativeKernel"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_launch_cooperative_kernel_params_t = - _ze_command_list_append_launch_cooperative_kernel_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendLaunchCooperativeKernel"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendLaunchCooperativeKernelCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_launch_cooperative_kernel_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchKernelIndirect"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_launch_kernel_indirect_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub phKernel: *mut ze_kernel_handle_t, - pub ppLaunchArgumentsBuffer: *mut *const ze_group_count_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_launch_kernel_indirect_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_launch_kernel_indirect_params_t>(), - 48usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_launch_kernel_indirect_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_indirect_params_t>())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_indirect_params_t>())) - .phKernel as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_indirect_params_t>())) - .ppLaunchArgumentsBuffer as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t), - "::", - stringify!(ppLaunchArgumentsBuffer) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_indirect_params_t>())) - .phSignalEvent as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_indirect_params_t>())) - .pnumWaitEvents as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_append_launch_kernel_indirect_params_t>())) - .pphWaitEvents as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_kernel_indirect_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchKernelIndirect"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_launch_kernel_indirect_params_t = - _ze_command_list_append_launch_kernel_indirect_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendLaunchKernelIndirect"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendLaunchKernelIndirectCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_launch_kernel_indirect_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchMultipleKernelsIndirect"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_command_list_append_launch_multiple_kernels_indirect_params_t { - pub phCommandList: *mut ze_command_list_handle_t, - pub pnumKernels: *mut u32, - pub pphKernels: *mut *mut ze_kernel_handle_t, - pub ppCountBuffer: *mut *const u32, - pub ppLaunchArgumentsBuffer: *mut *const ze_group_count_t, - pub phSignalEvent: *mut ze_event_handle_t, - pub pnumWaitEvents: *mut u32, - pub pphWaitEvents: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_append_launch_multiple_kernels_indirect_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_append_launch_multiple_kernels_indirect_params_t>(), - 64usize, - concat!( - "Size of: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_append_launch_multiple_kernels_indirect_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .phCommandList as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(phCommandList) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .pnumKernels as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(pnumKernels) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .pphKernels as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(pphKernels) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .ppCountBuffer as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(ppCountBuffer) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .ppLaunchArgumentsBuffer as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(ppLaunchArgumentsBuffer) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .phSignalEvent as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(phSignalEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .pnumWaitEvents as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(pnumWaitEvents) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::< - _ze_command_list_append_launch_multiple_kernels_indirect_params_t, - >())) - .pphWaitEvents as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_append_launch_multiple_kernels_indirect_params_t), - "::", - stringify!(pphWaitEvents) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeCommandListAppendLaunchMultipleKernelsIndirect"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_command_list_append_launch_multiple_kernels_indirect_params_t = - _ze_command_list_append_launch_multiple_kernels_indirect_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeCommandListAppendLaunchMultipleKernelsIndirect"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnCommandListAppendLaunchMultipleKernelsIndirectCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_command_list_append_launch_multiple_kernels_indirect_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of CommandList callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_command_list_callbacks_t { - pub pfnCreateCb: ze_pfnCommandListCreateCb_t, - pub pfnCreateImmediateCb: ze_pfnCommandListCreateImmediateCb_t, - pub pfnDestroyCb: ze_pfnCommandListDestroyCb_t, - pub pfnCloseCb: ze_pfnCommandListCloseCb_t, - pub pfnResetCb: ze_pfnCommandListResetCb_t, - pub pfnAppendWriteGlobalTimestampCb: ze_pfnCommandListAppendWriteGlobalTimestampCb_t, - pub pfnAppendBarrierCb: ze_pfnCommandListAppendBarrierCb_t, - pub pfnAppendMemoryRangesBarrierCb: ze_pfnCommandListAppendMemoryRangesBarrierCb_t, - pub pfnAppendMemoryCopyCb: ze_pfnCommandListAppendMemoryCopyCb_t, - pub pfnAppendMemoryFillCb: ze_pfnCommandListAppendMemoryFillCb_t, - pub pfnAppendMemoryCopyRegionCb: ze_pfnCommandListAppendMemoryCopyRegionCb_t, - pub pfnAppendMemoryCopyFromContextCb: ze_pfnCommandListAppendMemoryCopyFromContextCb_t, - pub pfnAppendImageCopyCb: ze_pfnCommandListAppendImageCopyCb_t, - pub pfnAppendImageCopyRegionCb: ze_pfnCommandListAppendImageCopyRegionCb_t, - pub pfnAppendImageCopyToMemoryCb: ze_pfnCommandListAppendImageCopyToMemoryCb_t, - pub pfnAppendImageCopyFromMemoryCb: ze_pfnCommandListAppendImageCopyFromMemoryCb_t, - pub pfnAppendMemoryPrefetchCb: ze_pfnCommandListAppendMemoryPrefetchCb_t, - pub pfnAppendMemAdviseCb: ze_pfnCommandListAppendMemAdviseCb_t, - pub pfnAppendSignalEventCb: ze_pfnCommandListAppendSignalEventCb_t, - pub pfnAppendWaitOnEventsCb: ze_pfnCommandListAppendWaitOnEventsCb_t, - pub pfnAppendEventResetCb: ze_pfnCommandListAppendEventResetCb_t, - pub pfnAppendQueryKernelTimestampsCb: ze_pfnCommandListAppendQueryKernelTimestampsCb_t, - pub pfnAppendLaunchKernelCb: ze_pfnCommandListAppendLaunchKernelCb_t, - pub pfnAppendLaunchCooperativeKernelCb: ze_pfnCommandListAppendLaunchCooperativeKernelCb_t, - pub pfnAppendLaunchKernelIndirectCb: ze_pfnCommandListAppendLaunchKernelIndirectCb_t, - pub pfnAppendLaunchMultipleKernelsIndirectCb: - ze_pfnCommandListAppendLaunchMultipleKernelsIndirectCb_t, -} -#[test] -fn bindgen_test_layout__ze_command_list_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_command_list_callbacks_t>(), - 208usize, - concat!("Size of: ", stringify!(_ze_command_list_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_command_list_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_command_list_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnCreateCb as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnCreateImmediateCb - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnCreateImmediateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnDestroyCb as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnCloseCb as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnCloseCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnResetCb as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnResetCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendWriteGlobalTimestampCb - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendWriteGlobalTimestampCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendBarrierCb as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendBarrierCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendMemoryRangesBarrierCb - as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendMemoryRangesBarrierCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendMemoryCopyCb - as *const _ as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendMemoryCopyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendMemoryFillCb - as *const _ as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendMemoryFillCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendMemoryCopyRegionCb - as *const _ as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendMemoryCopyRegionCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())) - .pfnAppendMemoryCopyFromContextCb as *const _ as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendMemoryCopyFromContextCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendImageCopyCb - as *const _ as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendImageCopyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendImageCopyRegionCb - as *const _ as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendImageCopyRegionCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendImageCopyToMemoryCb - as *const _ as usize - }, - 112usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendImageCopyToMemoryCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendImageCopyFromMemoryCb - as *const _ as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendImageCopyFromMemoryCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendMemoryPrefetchCb - as *const _ as usize - }, - 128usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendMemoryPrefetchCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendMemAdviseCb - as *const _ as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendMemAdviseCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendSignalEventCb - as *const _ as usize - }, - 144usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendSignalEventCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendWaitOnEventsCb - as *const _ as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendWaitOnEventsCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendEventResetCb - as *const _ as usize - }, - 160usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendEventResetCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())) - .pfnAppendQueryKernelTimestampsCb as *const _ as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendQueryKernelTimestampsCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendLaunchKernelCb - as *const _ as usize - }, - 176usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendLaunchKernelCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())) - .pfnAppendLaunchCooperativeKernelCb as *const _ as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendLaunchCooperativeKernelCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())).pfnAppendLaunchKernelIndirectCb - as *const _ as usize - }, - 192usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendLaunchKernelIndirectCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_command_list_callbacks_t>())) - .pfnAppendLaunchMultipleKernelsIndirectCb as *const _ as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(_ze_command_list_callbacks_t), - "::", - stringify!(pfnAppendLaunchMultipleKernelsIndirectCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of CommandList callback functions pointers"] -pub type ze_command_list_callbacks_t = _ze_command_list_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_fence_create_params_t { - pub phCommandQueue: *mut ze_command_queue_handle_t, - pub pdesc: *mut *const ze_fence_desc_t, - pub pphFence: *mut *mut ze_fence_handle_t, -} -#[test] -fn bindgen_test_layout__ze_fence_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_fence_create_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_fence_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_fence_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_fence_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_create_params_t>())).phCommandQueue as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_create_params_t), - "::", - stringify!(phCommandQueue) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_fence_create_params_t>())).pdesc as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_create_params_t>())).pphFence as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_create_params_t), - "::", - stringify!(pphFence) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_fence_create_params_t = _ze_fence_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeFenceCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnFenceCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_fence_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_fence_destroy_params_t { - pub phFence: *mut ze_fence_handle_t, -} -#[test] -fn bindgen_test_layout__ze_fence_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_fence_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_fence_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_fence_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_fence_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_destroy_params_t>())).phFence as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_destroy_params_t), - "::", - stringify!(phFence) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_fence_destroy_params_t = _ze_fence_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeFenceDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnFenceDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_fence_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceHostSynchronize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_fence_host_synchronize_params_t { - pub phFence: *mut ze_fence_handle_t, - pub ptimeout: *mut u64, -} -#[test] -fn bindgen_test_layout__ze_fence_host_synchronize_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_fence_host_synchronize_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_fence_host_synchronize_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_fence_host_synchronize_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_fence_host_synchronize_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_host_synchronize_params_t>())).phFence as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_host_synchronize_params_t), - "::", - stringify!(phFence) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_host_synchronize_params_t>())).ptimeout as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_host_synchronize_params_t), - "::", - stringify!(ptimeout) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceHostSynchronize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_fence_host_synchronize_params_t = _ze_fence_host_synchronize_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeFenceHostSynchronize"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnFenceHostSynchronizeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_fence_host_synchronize_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceQueryStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_fence_query_status_params_t { - pub phFence: *mut ze_fence_handle_t, -} -#[test] -fn bindgen_test_layout__ze_fence_query_status_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_fence_query_status_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_fence_query_status_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_fence_query_status_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_fence_query_status_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_query_status_params_t>())).phFence as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_query_status_params_t), - "::", - stringify!(phFence) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceQueryStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_fence_query_status_params_t = _ze_fence_query_status_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeFenceQueryStatus"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnFenceQueryStatusCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_fence_query_status_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_fence_reset_params_t { - pub phFence: *mut ze_fence_handle_t, -} -#[test] -fn bindgen_test_layout__ze_fence_reset_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_fence_reset_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_fence_reset_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_fence_reset_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_fence_reset_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_reset_params_t>())).phFence as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_reset_params_t), - "::", - stringify!(phFence) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeFenceReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_fence_reset_params_t = _ze_fence_reset_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeFenceReset"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnFenceResetCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_fence_reset_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Fence callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_fence_callbacks_t { - pub pfnCreateCb: ze_pfnFenceCreateCb_t, - pub pfnDestroyCb: ze_pfnFenceDestroyCb_t, - pub pfnHostSynchronizeCb: ze_pfnFenceHostSynchronizeCb_t, - pub pfnQueryStatusCb: ze_pfnFenceQueryStatusCb_t, - pub pfnResetCb: ze_pfnFenceResetCb_t, -} -#[test] -fn bindgen_test_layout__ze_fence_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_fence_callbacks_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_fence_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_fence_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_fence_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_callbacks_t>())).pfnHostSynchronizeCb as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_callbacks_t), - "::", - stringify!(pfnHostSynchronizeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_callbacks_t>())).pfnQueryStatusCb as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_callbacks_t), - "::", - stringify!(pfnQueryStatusCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_fence_callbacks_t>())).pfnResetCb as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_fence_callbacks_t), - "::", - stringify!(pfnResetCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Fence callback functions pointers"] -pub type ze_fence_callbacks_t = _ze_fence_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_pool_create_params_t { - pub phContext: *mut ze_context_handle_t, - pub pdesc: *mut *const ze_event_pool_desc_t, - pub pnumDevices: *mut u32, - pub pphDevices: *mut *mut ze_device_handle_t, - pub pphEventPool: *mut *mut ze_event_pool_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_pool_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_pool_create_params_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_event_pool_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_pool_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_pool_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_create_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_create_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_create_params_t>())).pdesc as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_create_params_t>())).pnumDevices as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_create_params_t), - "::", - stringify!(pnumDevices) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_create_params_t>())).pphDevices as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_create_params_t), - "::", - stringify!(pphDevices) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_create_params_t>())).pphEventPool as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_create_params_t), - "::", - stringify!(pphEventPool) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_pool_create_params_t = _ze_event_pool_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventPoolCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventPoolCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_pool_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_pool_destroy_params_t { - pub phEventPool: *mut ze_event_pool_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_pool_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_pool_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_event_pool_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_pool_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_pool_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_destroy_params_t>())).phEventPool as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_destroy_params_t), - "::", - stringify!(phEventPool) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_pool_destroy_params_t = _ze_event_pool_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventPoolDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventPoolDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_pool_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolGetIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_pool_get_ipc_handle_params_t { - pub phEventPool: *mut ze_event_pool_handle_t, - pub pphIpc: *mut *mut ze_ipc_event_pool_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_pool_get_ipc_handle_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_pool_get_ipc_handle_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_event_pool_get_ipc_handle_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_pool_get_ipc_handle_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_event_pool_get_ipc_handle_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_get_ipc_handle_params_t>())).phEventPool - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_get_ipc_handle_params_t), - "::", - stringify!(phEventPool) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_get_ipc_handle_params_t>())).pphIpc as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_get_ipc_handle_params_t), - "::", - stringify!(pphIpc) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolGetIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_pool_get_ipc_handle_params_t = _ze_event_pool_get_ipc_handle_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventPoolGetIpcHandle"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventPoolGetIpcHandleCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_pool_get_ipc_handle_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolOpenIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_pool_open_ipc_handle_params_t { - pub phContext: *mut ze_context_handle_t, - pub phIpc: *mut ze_ipc_event_pool_handle_t, - pub pphEventPool: *mut *mut ze_event_pool_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_pool_open_ipc_handle_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_pool_open_ipc_handle_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_event_pool_open_ipc_handle_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_pool_open_ipc_handle_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_event_pool_open_ipc_handle_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_open_ipc_handle_params_t>())).phContext - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_open_ipc_handle_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_open_ipc_handle_params_t>())).phIpc as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_open_ipc_handle_params_t), - "::", - stringify!(phIpc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_open_ipc_handle_params_t>())).pphEventPool - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_open_ipc_handle_params_t), - "::", - stringify!(pphEventPool) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolOpenIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_pool_open_ipc_handle_params_t = _ze_event_pool_open_ipc_handle_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventPoolOpenIpcHandle"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventPoolOpenIpcHandleCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_pool_open_ipc_handle_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolCloseIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_pool_close_ipc_handle_params_t { - pub phEventPool: *mut ze_event_pool_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_pool_close_ipc_handle_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_pool_close_ipc_handle_params_t>(), - 8usize, - concat!( - "Size of: ", - stringify!(_ze_event_pool_close_ipc_handle_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_pool_close_ipc_handle_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_event_pool_close_ipc_handle_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_close_ipc_handle_params_t>())).phEventPool - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_close_ipc_handle_params_t), - "::", - stringify!(phEventPool) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventPoolCloseIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_pool_close_ipc_handle_params_t = _ze_event_pool_close_ipc_handle_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventPoolCloseIpcHandle"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventPoolCloseIpcHandleCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_pool_close_ipc_handle_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of EventPool callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_event_pool_callbacks_t { - pub pfnCreateCb: ze_pfnEventPoolCreateCb_t, - pub pfnDestroyCb: ze_pfnEventPoolDestroyCb_t, - pub pfnGetIpcHandleCb: ze_pfnEventPoolGetIpcHandleCb_t, - pub pfnOpenIpcHandleCb: ze_pfnEventPoolOpenIpcHandleCb_t, - pub pfnCloseIpcHandleCb: ze_pfnEventPoolCloseIpcHandleCb_t, -} -#[test] -fn bindgen_test_layout__ze_event_pool_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_pool_callbacks_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_event_pool_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_pool_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_pool_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_callbacks_t>())).pfnGetIpcHandleCb as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_callbacks_t), - "::", - stringify!(pfnGetIpcHandleCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_callbacks_t>())).pfnOpenIpcHandleCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_callbacks_t), - "::", - stringify!(pfnOpenIpcHandleCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_pool_callbacks_t>())).pfnCloseIpcHandleCb as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_pool_callbacks_t), - "::", - stringify!(pfnCloseIpcHandleCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of EventPool callback functions pointers"] -pub type ze_event_pool_callbacks_t = _ze_event_pool_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_create_params_t { - pub phEventPool: *mut ze_event_pool_handle_t, - pub pdesc: *mut *const ze_event_desc_t, - pub pphEvent: *mut *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_create_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_event_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_create_params_t>())).phEventPool as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_create_params_t), - "::", - stringify!(phEventPool) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_event_create_params_t>())).pdesc as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_create_params_t>())).pphEvent as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_create_params_t), - "::", - stringify!(pphEvent) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_create_params_t = _ze_event_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_destroy_params_t { - pub phEvent: *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_event_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_destroy_params_t>())).phEvent as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_destroy_params_t), - "::", - stringify!(phEvent) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_destroy_params_t = _ze_event_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventHostSignal"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_host_signal_params_t { - pub phEvent: *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_host_signal_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_host_signal_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_event_host_signal_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_host_signal_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_host_signal_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_host_signal_params_t>())).phEvent as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_host_signal_params_t), - "::", - stringify!(phEvent) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventHostSignal"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_host_signal_params_t = _ze_event_host_signal_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventHostSignal"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventHostSignalCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_host_signal_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventHostSynchronize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_host_synchronize_params_t { - pub phEvent: *mut ze_event_handle_t, - pub ptimeout: *mut u64, -} -#[test] -fn bindgen_test_layout__ze_event_host_synchronize_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_host_synchronize_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_event_host_synchronize_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_host_synchronize_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_event_host_synchronize_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_host_synchronize_params_t>())).phEvent as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_host_synchronize_params_t), - "::", - stringify!(phEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_host_synchronize_params_t>())).ptimeout as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_host_synchronize_params_t), - "::", - stringify!(ptimeout) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventHostSynchronize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_host_synchronize_params_t = _ze_event_host_synchronize_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventHostSynchronize"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventHostSynchronizeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_host_synchronize_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventQueryStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_query_status_params_t { - pub phEvent: *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_query_status_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_query_status_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_event_query_status_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_query_status_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_query_status_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_query_status_params_t>())).phEvent as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_query_status_params_t), - "::", - stringify!(phEvent) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventQueryStatus"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_query_status_params_t = _ze_event_query_status_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventQueryStatus"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventQueryStatusCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_query_status_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventHostReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_host_reset_params_t { - pub phEvent: *mut ze_event_handle_t, -} -#[test] -fn bindgen_test_layout__ze_event_host_reset_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_host_reset_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_event_host_reset_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_host_reset_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_host_reset_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_host_reset_params_t>())).phEvent as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_host_reset_params_t), - "::", - stringify!(phEvent) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventHostReset"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_host_reset_params_t = _ze_event_host_reset_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventHostReset"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventHostResetCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_host_reset_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventQueryKernelTimestamp"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_event_query_kernel_timestamp_params_t { - pub phEvent: *mut ze_event_handle_t, - pub pdstptr: *mut *mut ze_kernel_timestamp_result_t, -} -#[test] -fn bindgen_test_layout__ze_event_query_kernel_timestamp_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_query_kernel_timestamp_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_event_query_kernel_timestamp_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_query_kernel_timestamp_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_event_query_kernel_timestamp_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_query_kernel_timestamp_params_t>())).phEvent - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_query_kernel_timestamp_params_t), - "::", - stringify!(phEvent) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_query_kernel_timestamp_params_t>())).pdstptr - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_query_kernel_timestamp_params_t), - "::", - stringify!(pdstptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeEventQueryKernelTimestamp"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_event_query_kernel_timestamp_params_t = _ze_event_query_kernel_timestamp_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeEventQueryKernelTimestamp"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnEventQueryKernelTimestampCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_event_query_kernel_timestamp_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Event callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_event_callbacks_t { - pub pfnCreateCb: ze_pfnEventCreateCb_t, - pub pfnDestroyCb: ze_pfnEventDestroyCb_t, - pub pfnHostSignalCb: ze_pfnEventHostSignalCb_t, - pub pfnHostSynchronizeCb: ze_pfnEventHostSynchronizeCb_t, - pub pfnQueryStatusCb: ze_pfnEventQueryStatusCb_t, - pub pfnHostResetCb: ze_pfnEventHostResetCb_t, - pub pfnQueryKernelTimestampCb: ze_pfnEventQueryKernelTimestampCb_t, -} -#[test] -fn bindgen_test_layout__ze_event_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_event_callbacks_t>(), - 56usize, - concat!("Size of: ", stringify!(_ze_event_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_event_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_event_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_callbacks_t>())).pfnHostSignalCb as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_callbacks_t), - "::", - stringify!(pfnHostSignalCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_callbacks_t>())).pfnHostSynchronizeCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_callbacks_t), - "::", - stringify!(pfnHostSynchronizeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_callbacks_t>())).pfnQueryStatusCb as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_callbacks_t), - "::", - stringify!(pfnQueryStatusCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_callbacks_t>())).pfnHostResetCb as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_callbacks_t), - "::", - stringify!(pfnHostResetCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_event_callbacks_t>())).pfnQueryKernelTimestampCb as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_event_callbacks_t), - "::", - stringify!(pfnQueryKernelTimestampCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Event callback functions pointers"] -pub type ze_event_callbacks_t = _ze_event_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeImageGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_image_get_properties_params_t { - pub phDevice: *mut ze_device_handle_t, - pub pdesc: *mut *const ze_image_desc_t, - pub ppImageProperties: *mut *mut ze_image_properties_t, -} -#[test] -fn bindgen_test_layout__ze_image_get_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_get_properties_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_image_get_properties_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_get_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_image_get_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_get_properties_params_t>())).phDevice as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_get_properties_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_get_properties_params_t>())).pdesc as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_get_properties_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_get_properties_params_t>())).ppImageProperties - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_get_properties_params_t), - "::", - stringify!(ppImageProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeImageGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_image_get_properties_params_t = _ze_image_get_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeImageGetProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnImageGetPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_image_get_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeImageCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_image_create_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pdesc: *mut *const ze_image_desc_t, - pub pphImage: *mut *mut ze_image_handle_t, -} -#[test] -fn bindgen_test_layout__ze_image_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_create_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_image_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_image_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_create_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_create_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_create_params_t>())).phDevice as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_create_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_image_create_params_t>())).pdesc as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_create_params_t>())).pphImage as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_create_params_t), - "::", - stringify!(pphImage) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeImageCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_image_create_params_t = _ze_image_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeImageCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnImageCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_image_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeImageDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_image_destroy_params_t { - pub phImage: *mut ze_image_handle_t, -} -#[test] -fn bindgen_test_layout__ze_image_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_image_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_image_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_destroy_params_t>())).phImage as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_destroy_params_t), - "::", - stringify!(phImage) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeImageDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_image_destroy_params_t = _ze_image_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeImageDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnImageDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_image_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Image callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_image_callbacks_t { - pub pfnGetPropertiesCb: ze_pfnImageGetPropertiesCb_t, - pub pfnCreateCb: ze_pfnImageCreateCb_t, - pub pfnDestroyCb: ze_pfnImageDestroyCb_t, -} -#[test] -fn bindgen_test_layout__ze_image_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_image_callbacks_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_image_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_image_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_image_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_callbacks_t>())).pfnGetPropertiesCb as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_callbacks_t), - "::", - stringify!(pfnGetPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_image_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_image_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Image callback functions pointers"] -pub type ze_image_callbacks_t = _ze_image_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_create_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pdesc: *mut *const ze_module_desc_t, - pub pphModule: *mut *mut ze_module_handle_t, - pub pphBuildLog: *mut *mut ze_module_build_log_handle_t, -} -#[test] -fn bindgen_test_layout__ze_module_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_create_params_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_module_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_module_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_create_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_create_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_create_params_t>())).phDevice as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_create_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_create_params_t>())).pdesc as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_create_params_t>())).pphModule as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_create_params_t), - "::", - stringify!(pphModule) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_create_params_t>())).pphBuildLog as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_create_params_t), - "::", - stringify!(pphBuildLog) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_create_params_t = _ze_module_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_destroy_params_t { - pub phModule: *mut ze_module_handle_t, -} -#[test] -fn bindgen_test_layout__ze_module_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_module_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_module_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_destroy_params_t>())).phModule as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_destroy_params_t), - "::", - stringify!(phModule) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_destroy_params_t = _ze_module_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleDynamicLink"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_dynamic_link_params_t { - pub pnumModules: *mut u32, - pub pphModules: *mut *mut ze_module_handle_t, - pub pphLinkLog: *mut *mut ze_module_build_log_handle_t, -} -#[test] -fn bindgen_test_layout__ze_module_dynamic_link_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_dynamic_link_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_module_dynamic_link_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_dynamic_link_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_dynamic_link_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_dynamic_link_params_t>())).pnumModules as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_dynamic_link_params_t), - "::", - stringify!(pnumModules) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_dynamic_link_params_t>())).pphModules as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_dynamic_link_params_t), - "::", - stringify!(pphModules) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_dynamic_link_params_t>())).pphLinkLog as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_dynamic_link_params_t), - "::", - stringify!(pphLinkLog) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleDynamicLink"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_dynamic_link_params_t = _ze_module_dynamic_link_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleDynamicLink"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleDynamicLinkCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_dynamic_link_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetNativeBinary"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_get_native_binary_params_t { - pub phModule: *mut ze_module_handle_t, - pub ppSize: *mut *mut usize, - pub ppModuleNativeBinary: *mut *mut u8, -} -#[test] -fn bindgen_test_layout__ze_module_get_native_binary_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_get_native_binary_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_module_get_native_binary_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_get_native_binary_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_get_native_binary_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_native_binary_params_t>())).phModule as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_native_binary_params_t), - "::", - stringify!(phModule) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_native_binary_params_t>())).ppSize as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_native_binary_params_t), - "::", - stringify!(ppSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_native_binary_params_t>())).ppModuleNativeBinary - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_native_binary_params_t), - "::", - stringify!(ppModuleNativeBinary) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetNativeBinary"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_get_native_binary_params_t = _ze_module_get_native_binary_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleGetNativeBinary"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleGetNativeBinaryCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_get_native_binary_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetGlobalPointer"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_get_global_pointer_params_t { - pub phModule: *mut ze_module_handle_t, - pub ppGlobalName: *mut *const ::std::os::raw::c_char, - pub ppSize: *mut *mut usize, - pub ppptr: *mut *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_module_get_global_pointer_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_get_global_pointer_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_module_get_global_pointer_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_get_global_pointer_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_get_global_pointer_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_global_pointer_params_t>())).phModule as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_global_pointer_params_t), - "::", - stringify!(phModule) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_global_pointer_params_t>())).ppGlobalName - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_global_pointer_params_t), - "::", - stringify!(ppGlobalName) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_global_pointer_params_t>())).ppSize as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_global_pointer_params_t), - "::", - stringify!(ppSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_global_pointer_params_t>())).ppptr as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_global_pointer_params_t), - "::", - stringify!(ppptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetGlobalPointer"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_get_global_pointer_params_t = _ze_module_get_global_pointer_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleGetGlobalPointer"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleGetGlobalPointerCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_get_global_pointer_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetKernelNames"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_get_kernel_names_params_t { - pub phModule: *mut ze_module_handle_t, - pub ppCount: *mut *mut u32, - pub ppNames: *mut *mut *const ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout__ze_module_get_kernel_names_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_get_kernel_names_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_module_get_kernel_names_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_get_kernel_names_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_get_kernel_names_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_kernel_names_params_t>())).phModule as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_kernel_names_params_t), - "::", - stringify!(phModule) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_kernel_names_params_t>())).ppCount as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_kernel_names_params_t), - "::", - stringify!(ppCount) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_kernel_names_params_t>())).ppNames as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_kernel_names_params_t), - "::", - stringify!(ppNames) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetKernelNames"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_get_kernel_names_params_t = _ze_module_get_kernel_names_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleGetKernelNames"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleGetKernelNamesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_get_kernel_names_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_get_properties_params_t { - pub phModule: *mut ze_module_handle_t, - pub ppModuleProperties: *mut *mut ze_module_properties_t, -} -#[test] -fn bindgen_test_layout__ze_module_get_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_get_properties_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_module_get_properties_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_get_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_get_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_properties_params_t>())).phModule as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_properties_params_t), - "::", - stringify!(phModule) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_properties_params_t>())).ppModuleProperties - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_properties_params_t), - "::", - stringify!(ppModuleProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_get_properties_params_t = _ze_module_get_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleGetProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleGetPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_get_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetFunctionPointer"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_get_function_pointer_params_t { - pub phModule: *mut ze_module_handle_t, - pub ppFunctionName: *mut *const ::std::os::raw::c_char, - pub ppfnFunction: *mut *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_module_get_function_pointer_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_get_function_pointer_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_module_get_function_pointer_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_get_function_pointer_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_get_function_pointer_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_function_pointer_params_t>())).phModule - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_function_pointer_params_t), - "::", - stringify!(phModule) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_function_pointer_params_t>())).ppFunctionName - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_function_pointer_params_t), - "::", - stringify!(ppFunctionName) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_get_function_pointer_params_t>())).ppfnFunction - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_get_function_pointer_params_t), - "::", - stringify!(ppfnFunction) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleGetFunctionPointer"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_get_function_pointer_params_t = _ze_module_get_function_pointer_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleGetFunctionPointer"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleGetFunctionPointerCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_get_function_pointer_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Module callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_module_callbacks_t { - pub pfnCreateCb: ze_pfnModuleCreateCb_t, - pub pfnDestroyCb: ze_pfnModuleDestroyCb_t, - pub pfnDynamicLinkCb: ze_pfnModuleDynamicLinkCb_t, - pub pfnGetNativeBinaryCb: ze_pfnModuleGetNativeBinaryCb_t, - pub pfnGetGlobalPointerCb: ze_pfnModuleGetGlobalPointerCb_t, - pub pfnGetKernelNamesCb: ze_pfnModuleGetKernelNamesCb_t, - pub pfnGetPropertiesCb: ze_pfnModuleGetPropertiesCb_t, - pub pfnGetFunctionPointerCb: ze_pfnModuleGetFunctionPointerCb_t, -} -#[test] -fn bindgen_test_layout__ze_module_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_callbacks_t>(), - 64usize, - concat!("Size of: ", stringify!(_ze_module_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_module_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnDynamicLinkCb as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnDynamicLinkCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnGetNativeBinaryCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnGetNativeBinaryCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnGetGlobalPointerCb as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnGetGlobalPointerCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnGetKernelNamesCb as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnGetKernelNamesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnGetPropertiesCb as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnGetPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_callbacks_t>())).pfnGetFunctionPointerCb as *const _ - as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_callbacks_t), - "::", - stringify!(pfnGetFunctionPointerCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Module callback functions pointers"] -pub type ze_module_callbacks_t = _ze_module_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleBuildLogDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_build_log_destroy_params_t { - pub phModuleBuildLog: *mut ze_module_build_log_handle_t, -} -#[test] -fn bindgen_test_layout__ze_module_build_log_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_build_log_destroy_params_t>(), - 8usize, - concat!( - "Size of: ", - stringify!(_ze_module_build_log_destroy_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_build_log_destroy_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_build_log_destroy_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_build_log_destroy_params_t>())).phModuleBuildLog - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_build_log_destroy_params_t), - "::", - stringify!(phModuleBuildLog) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleBuildLogDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_build_log_destroy_params_t = _ze_module_build_log_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleBuildLogDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleBuildLogDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_build_log_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleBuildLogGetString"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_module_build_log_get_string_params_t { - pub phModuleBuildLog: *mut ze_module_build_log_handle_t, - pub ppSize: *mut *mut usize, - pub ppBuildLog: *mut *mut ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout__ze_module_build_log_get_string_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_build_log_get_string_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_module_build_log_get_string_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_build_log_get_string_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_build_log_get_string_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_build_log_get_string_params_t>())).phModuleBuildLog - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_build_log_get_string_params_t), - "::", - stringify!(phModuleBuildLog) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_build_log_get_string_params_t>())).ppSize as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_build_log_get_string_params_t), - "::", - stringify!(ppSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_build_log_get_string_params_t>())).ppBuildLog - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_build_log_get_string_params_t), - "::", - stringify!(ppBuildLog) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeModuleBuildLogGetString"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_module_build_log_get_string_params_t = _ze_module_build_log_get_string_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeModuleBuildLogGetString"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnModuleBuildLogGetStringCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_module_build_log_get_string_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of ModuleBuildLog callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_module_build_log_callbacks_t { - pub pfnDestroyCb: ze_pfnModuleBuildLogDestroyCb_t, - pub pfnGetStringCb: ze_pfnModuleBuildLogGetStringCb_t, -} -#[test] -fn bindgen_test_layout__ze_module_build_log_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_module_build_log_callbacks_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_module_build_log_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_module_build_log_callbacks_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_module_build_log_callbacks_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_build_log_callbacks_t>())).pfnDestroyCb as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_build_log_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_module_build_log_callbacks_t>())).pfnGetStringCb as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_module_build_log_callbacks_t), - "::", - stringify!(pfnGetStringCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of ModuleBuildLog callback functions pointers"] -pub type ze_module_build_log_callbacks_t = _ze_module_build_log_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_create_params_t { - pub phModule: *mut ze_module_handle_t, - pub pdesc: *mut *const ze_kernel_desc_t, - pub pphKernel: *mut *mut ze_kernel_handle_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_create_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_kernel_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_create_params_t>())).phModule as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_create_params_t), - "::", - stringify!(phModule) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_create_params_t>())).pdesc as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_create_params_t>())).pphKernel as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_create_params_t), - "::", - stringify!(pphKernel) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_create_params_t = _ze_kernel_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_destroy_params_t { - pub phKernel: *mut ze_kernel_handle_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_kernel_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_destroy_params_t>())).phKernel as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_destroy_params_t), - "::", - stringify!(phKernel) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_destroy_params_t = _ze_kernel_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetCacheConfig"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_set_cache_config_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub pflags: *mut ze_cache_config_flags_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_set_cache_config_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_set_cache_config_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_kernel_set_cache_config_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_set_cache_config_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_set_cache_config_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_cache_config_params_t>())).phKernel as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_cache_config_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_cache_config_params_t>())).pflags as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_cache_config_params_t), - "::", - stringify!(pflags) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetCacheConfig"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_set_cache_config_params_t = _ze_kernel_set_cache_config_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelSetCacheConfig"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelSetCacheConfigCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_set_cache_config_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetGroupSize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_set_group_size_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub pgroupSizeX: *mut u32, - pub pgroupSizeY: *mut u32, - pub pgroupSizeZ: *mut u32, -} -#[test] -fn bindgen_test_layout__ze_kernel_set_group_size_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_set_group_size_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_kernel_set_group_size_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_set_group_size_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_set_group_size_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_group_size_params_t>())).phKernel as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_group_size_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_group_size_params_t>())).pgroupSizeX as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_group_size_params_t), - "::", - stringify!(pgroupSizeX) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_group_size_params_t>())).pgroupSizeY as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_group_size_params_t), - "::", - stringify!(pgroupSizeY) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_group_size_params_t>())).pgroupSizeZ as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_group_size_params_t), - "::", - stringify!(pgroupSizeZ) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetGroupSize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_set_group_size_params_t = _ze_kernel_set_group_size_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelSetGroupSize"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelSetGroupSizeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_set_group_size_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSuggestGroupSize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_suggest_group_size_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub pglobalSizeX: *mut u32, - pub pglobalSizeY: *mut u32, - pub pglobalSizeZ: *mut u32, - pub pgroupSizeX: *mut *mut u32, - pub pgroupSizeY: *mut *mut u32, - pub pgroupSizeZ: *mut *mut u32, -} -#[test] -fn bindgen_test_layout__ze_kernel_suggest_group_size_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_suggest_group_size_params_t>(), - 56usize, - concat!( - "Size of: ", - stringify!(_ze_kernel_suggest_group_size_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_suggest_group_size_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_suggest_group_size_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_group_size_params_t>())).phKernel as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_group_size_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_group_size_params_t>())).pglobalSizeX - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_group_size_params_t), - "::", - stringify!(pglobalSizeX) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_group_size_params_t>())).pglobalSizeY - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_group_size_params_t), - "::", - stringify!(pglobalSizeY) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_group_size_params_t>())).pglobalSizeZ - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_group_size_params_t), - "::", - stringify!(pglobalSizeZ) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_group_size_params_t>())).pgroupSizeX - as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_group_size_params_t), - "::", - stringify!(pgroupSizeX) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_group_size_params_t>())).pgroupSizeY - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_group_size_params_t), - "::", - stringify!(pgroupSizeY) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_group_size_params_t>())).pgroupSizeZ - as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_group_size_params_t), - "::", - stringify!(pgroupSizeZ) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSuggestGroupSize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_suggest_group_size_params_t = _ze_kernel_suggest_group_size_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelSuggestGroupSize"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelSuggestGroupSizeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_suggest_group_size_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSuggestMaxCooperativeGroupCount"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_suggest_max_cooperative_group_count_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub ptotalGroupCount: *mut *mut u32, -} -#[test] -fn bindgen_test_layout__ze_kernel_suggest_max_cooperative_group_count_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_suggest_max_cooperative_group_count_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_kernel_suggest_max_cooperative_group_count_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_suggest_max_cooperative_group_count_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_suggest_max_cooperative_group_count_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_max_cooperative_group_count_params_t>())) - .phKernel as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_max_cooperative_group_count_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_suggest_max_cooperative_group_count_params_t>())) - .ptotalGroupCount as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_suggest_max_cooperative_group_count_params_t), - "::", - stringify!(ptotalGroupCount) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSuggestMaxCooperativeGroupCount"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_suggest_max_cooperative_group_count_params_t = - _ze_kernel_suggest_max_cooperative_group_count_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelSuggestMaxCooperativeGroupCount"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelSuggestMaxCooperativeGroupCountCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_suggest_max_cooperative_group_count_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetArgumentValue"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_set_argument_value_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub pargIndex: *mut u32, - pub pargSize: *mut usize, - pub ppArgValue: *mut *const ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_kernel_set_argument_value_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_set_argument_value_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_kernel_set_argument_value_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_set_argument_value_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_set_argument_value_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_argument_value_params_t>())).phKernel as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_argument_value_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_argument_value_params_t>())).pargIndex as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_argument_value_params_t), - "::", - stringify!(pargIndex) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_argument_value_params_t>())).pargSize as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_argument_value_params_t), - "::", - stringify!(pargSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_argument_value_params_t>())).ppArgValue - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_argument_value_params_t), - "::", - stringify!(ppArgValue) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetArgumentValue"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_set_argument_value_params_t = _ze_kernel_set_argument_value_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelSetArgumentValue"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelSetArgumentValueCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_set_argument_value_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetIndirectAccess"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_set_indirect_access_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub pflags: *mut ze_kernel_indirect_access_flags_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_set_indirect_access_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_set_indirect_access_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_kernel_set_indirect_access_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_set_indirect_access_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_set_indirect_access_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_indirect_access_params_t>())).phKernel as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_indirect_access_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_set_indirect_access_params_t>())).pflags as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_set_indirect_access_params_t), - "::", - stringify!(pflags) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelSetIndirectAccess"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_set_indirect_access_params_t = _ze_kernel_set_indirect_access_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelSetIndirectAccess"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelSetIndirectAccessCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_set_indirect_access_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetIndirectAccess"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_get_indirect_access_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub ppFlags: *mut *mut ze_kernel_indirect_access_flags_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_get_indirect_access_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_get_indirect_access_params_t>(), - 16usize, - concat!( - "Size of: ", - stringify!(_ze_kernel_get_indirect_access_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_get_indirect_access_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_get_indirect_access_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_indirect_access_params_t>())).phKernel as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_indirect_access_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_indirect_access_params_t>())).ppFlags as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_indirect_access_params_t), - "::", - stringify!(ppFlags) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetIndirectAccess"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_get_indirect_access_params_t = _ze_kernel_get_indirect_access_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelGetIndirectAccess"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelGetIndirectAccessCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_get_indirect_access_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetSourceAttributes"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_get_source_attributes_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub ppSize: *mut *mut u32, - pub ppString: *mut *mut *mut ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout__ze_kernel_get_source_attributes_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_get_source_attributes_params_t>(), - 24usize, - concat!( - "Size of: ", - stringify!(_ze_kernel_get_source_attributes_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_get_source_attributes_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_get_source_attributes_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_source_attributes_params_t>())).phKernel - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_source_attributes_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_source_attributes_params_t>())).ppSize as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_source_attributes_params_t), - "::", - stringify!(ppSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_source_attributes_params_t>())).ppString - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_source_attributes_params_t), - "::", - stringify!(ppString) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetSourceAttributes"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_get_source_attributes_params_t = _ze_kernel_get_source_attributes_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelGetSourceAttributes"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelGetSourceAttributesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_get_source_attributes_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_get_properties_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub ppKernelProperties: *mut *mut ze_kernel_properties_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_get_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_get_properties_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_kernel_get_properties_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_get_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_kernel_get_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_properties_params_t>())).phKernel as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_properties_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_properties_params_t>())).ppKernelProperties - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_properties_params_t), - "::", - stringify!(ppKernelProperties) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_get_properties_params_t = _ze_kernel_get_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelGetProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelGetPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_get_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetName"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_kernel_get_name_params_t { - pub phKernel: *mut ze_kernel_handle_t, - pub ppSize: *mut *mut usize, - pub ppName: *mut *mut ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout__ze_kernel_get_name_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_get_name_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_kernel_get_name_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_get_name_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_get_name_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_name_params_t>())).phKernel as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_name_params_t), - "::", - stringify!(phKernel) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_name_params_t>())).ppSize as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_name_params_t), - "::", - stringify!(ppSize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_get_name_params_t>())).ppName as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_get_name_params_t), - "::", - stringify!(ppName) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeKernelGetName"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_kernel_get_name_params_t = _ze_kernel_get_name_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeKernelGetName"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnKernelGetNameCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_kernel_get_name_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Kernel callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_kernel_callbacks_t { - pub pfnCreateCb: ze_pfnKernelCreateCb_t, - pub pfnDestroyCb: ze_pfnKernelDestroyCb_t, - pub pfnSetCacheConfigCb: ze_pfnKernelSetCacheConfigCb_t, - pub pfnSetGroupSizeCb: ze_pfnKernelSetGroupSizeCb_t, - pub pfnSuggestGroupSizeCb: ze_pfnKernelSuggestGroupSizeCb_t, - pub pfnSuggestMaxCooperativeGroupCountCb: ze_pfnKernelSuggestMaxCooperativeGroupCountCb_t, - pub pfnSetArgumentValueCb: ze_pfnKernelSetArgumentValueCb_t, - pub pfnSetIndirectAccessCb: ze_pfnKernelSetIndirectAccessCb_t, - pub pfnGetIndirectAccessCb: ze_pfnKernelGetIndirectAccessCb_t, - pub pfnGetSourceAttributesCb: ze_pfnKernelGetSourceAttributesCb_t, - pub pfnGetPropertiesCb: ze_pfnKernelGetPropertiesCb_t, - pub pfnGetNameCb: ze_pfnKernelGetNameCb_t, -} -#[test] -fn bindgen_test_layout__ze_kernel_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_kernel_callbacks_t>(), - 96usize, - concat!("Size of: ", stringify!(_ze_kernel_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_kernel_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_kernel_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnSetCacheConfigCb as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnSetCacheConfigCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnSetGroupSizeCb as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnSetGroupSizeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnSuggestGroupSizeCb as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnSuggestGroupSizeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnSuggestMaxCooperativeGroupCountCb - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnSuggestMaxCooperativeGroupCountCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnSetArgumentValueCb as *const _ - as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnSetArgumentValueCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnSetIndirectAccessCb as *const _ - as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnSetIndirectAccessCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnGetIndirectAccessCb as *const _ - as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnGetIndirectAccessCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnGetSourceAttributesCb as *const _ - as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnGetSourceAttributesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnGetPropertiesCb as *const _ - as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnGetPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_kernel_callbacks_t>())).pfnGetNameCb as *const _ as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(_ze_kernel_callbacks_t), - "::", - stringify!(pfnGetNameCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Kernel callback functions pointers"] -pub type ze_kernel_callbacks_t = _ze_kernel_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeSamplerCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_sampler_create_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pdesc: *mut *const ze_sampler_desc_t, - pub pphSampler: *mut *mut ze_sampler_handle_t, -} -#[test] -fn bindgen_test_layout__ze_sampler_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_sampler_create_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_sampler_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_sampler_create_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_sampler_create_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_sampler_create_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_create_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_sampler_create_params_t>())).phDevice as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_create_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_sampler_create_params_t>())).pdesc as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_sampler_create_params_t>())).pphSampler as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_create_params_t), - "::", - stringify!(pphSampler) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeSamplerCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_sampler_create_params_t = _ze_sampler_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeSamplerCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnSamplerCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_sampler_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeSamplerDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_sampler_destroy_params_t { - pub phSampler: *mut ze_sampler_handle_t, -} -#[test] -fn bindgen_test_layout__ze_sampler_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_sampler_destroy_params_t>(), - 8usize, - concat!("Size of: ", stringify!(_ze_sampler_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_sampler_destroy_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_sampler_destroy_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_sampler_destroy_params_t>())).phSampler as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_destroy_params_t), - "::", - stringify!(phSampler) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeSamplerDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_sampler_destroy_params_t = _ze_sampler_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeSamplerDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnSamplerDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_sampler_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Sampler callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_sampler_callbacks_t { - pub pfnCreateCb: ze_pfnSamplerCreateCb_t, - pub pfnDestroyCb: ze_pfnSamplerDestroyCb_t, -} -#[test] -fn bindgen_test_layout__ze_sampler_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_sampler_callbacks_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_sampler_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_sampler_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_sampler_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_sampler_callbacks_t>())).pfnCreateCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_sampler_callbacks_t>())).pfnDestroyCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_sampler_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Sampler callback functions pointers"] -pub type ze_sampler_callbacks_t = _ze_sampler_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zePhysicalMemCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_physical_mem_create_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub pdesc: *mut *mut ze_physical_mem_desc_t, - pub pphPhysicalMemory: *mut *mut ze_physical_mem_handle_t, -} -#[test] -fn bindgen_test_layout__ze_physical_mem_create_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_physical_mem_create_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_physical_mem_create_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_physical_mem_create_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_physical_mem_create_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_create_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_create_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_create_params_t>())).phDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_create_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_create_params_t>())).pdesc as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_create_params_t), - "::", - stringify!(pdesc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_create_params_t>())).pphPhysicalMemory - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_create_params_t), - "::", - stringify!(pphPhysicalMemory) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zePhysicalMemCreate"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_physical_mem_create_params_t = _ze_physical_mem_create_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zePhysicalMemCreate"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnPhysicalMemCreateCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_physical_mem_create_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zePhysicalMemDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_physical_mem_destroy_params_t { - pub phContext: *mut ze_context_handle_t, - pub phPhysicalMemory: *mut ze_physical_mem_handle_t, -} -#[test] -fn bindgen_test_layout__ze_physical_mem_destroy_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_physical_mem_destroy_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_physical_mem_destroy_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_physical_mem_destroy_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_physical_mem_destroy_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_destroy_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_destroy_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_destroy_params_t>())).phPhysicalMemory - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_destroy_params_t), - "::", - stringify!(phPhysicalMemory) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zePhysicalMemDestroy"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_physical_mem_destroy_params_t = _ze_physical_mem_destroy_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zePhysicalMemDestroy"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnPhysicalMemDestroyCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_physical_mem_destroy_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of PhysicalMem callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_physical_mem_callbacks_t { - pub pfnCreateCb: ze_pfnPhysicalMemCreateCb_t, - pub pfnDestroyCb: ze_pfnPhysicalMemDestroyCb_t, -} -#[test] -fn bindgen_test_layout__ze_physical_mem_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_physical_mem_callbacks_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_physical_mem_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_physical_mem_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_physical_mem_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_callbacks_t>())).pfnCreateCb as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_callbacks_t), - "::", - stringify!(pfnCreateCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_physical_mem_callbacks_t>())).pfnDestroyCb as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_physical_mem_callbacks_t), - "::", - stringify!(pfnDestroyCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of PhysicalMem callback functions pointers"] -pub type ze_physical_mem_callbacks_t = _ze_physical_mem_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemAllocShared"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_alloc_shared_params_t { - pub phContext: *mut ze_context_handle_t, - pub pdevice_desc: *mut *const ze_device_mem_alloc_desc_t, - pub phost_desc: *mut *const ze_host_mem_alloc_desc_t, - pub psize: *mut usize, - pub palignment: *mut usize, - pub phDevice: *mut ze_device_handle_t, - pub ppptr: *mut *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_mem_alloc_shared_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_alloc_shared_params_t>(), - 56usize, - concat!("Size of: ", stringify!(_ze_mem_alloc_shared_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_alloc_shared_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_mem_alloc_shared_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_shared_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_shared_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_shared_params_t>())).pdevice_desc as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_shared_params_t), - "::", - stringify!(pdevice_desc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_shared_params_t>())).phost_desc as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_shared_params_t), - "::", - stringify!(phost_desc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_shared_params_t>())).psize as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_shared_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_shared_params_t>())).palignment as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_shared_params_t), - "::", - stringify!(palignment) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_shared_params_t>())).phDevice as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_shared_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_shared_params_t>())).ppptr as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_shared_params_t), - "::", - stringify!(ppptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemAllocShared"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_alloc_shared_params_t = _ze_mem_alloc_shared_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemAllocShared"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemAllocSharedCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_alloc_shared_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemAllocDevice"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_alloc_device_params_t { - pub phContext: *mut ze_context_handle_t, - pub pdevice_desc: *mut *const ze_device_mem_alloc_desc_t, - pub psize: *mut usize, - pub palignment: *mut usize, - pub phDevice: *mut ze_device_handle_t, - pub ppptr: *mut *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_mem_alloc_device_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_alloc_device_params_t>(), - 48usize, - concat!("Size of: ", stringify!(_ze_mem_alloc_device_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_alloc_device_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_mem_alloc_device_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_device_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_device_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_device_params_t>())).pdevice_desc as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_device_params_t), - "::", - stringify!(pdevice_desc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_device_params_t>())).psize as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_device_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_device_params_t>())).palignment as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_device_params_t), - "::", - stringify!(palignment) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_device_params_t>())).phDevice as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_device_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_device_params_t>())).ppptr as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_device_params_t), - "::", - stringify!(ppptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemAllocDevice"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_alloc_device_params_t = _ze_mem_alloc_device_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemAllocDevice"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemAllocDeviceCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_alloc_device_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemAllocHost"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_alloc_host_params_t { - pub phContext: *mut ze_context_handle_t, - pub phost_desc: *mut *const ze_host_mem_alloc_desc_t, - pub psize: *mut usize, - pub palignment: *mut usize, - pub ppptr: *mut *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_mem_alloc_host_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_alloc_host_params_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_mem_alloc_host_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_alloc_host_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_mem_alloc_host_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_host_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_host_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_host_params_t>())).phost_desc as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_host_params_t), - "::", - stringify!(phost_desc) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_host_params_t>())).psize as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_host_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_host_params_t>())).palignment as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_host_params_t), - "::", - stringify!(palignment) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_alloc_host_params_t>())).ppptr as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_alloc_host_params_t), - "::", - stringify!(ppptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemAllocHost"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_alloc_host_params_t = _ze_mem_alloc_host_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemAllocHost"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemAllocHostCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_alloc_host_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemFree"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_free_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_mem_free_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_free_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_mem_free_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_free_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_mem_free_params_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_mem_free_params_t>())).phContext as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_free_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_mem_free_params_t>())).pptr as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_free_params_t), - "::", - stringify!(pptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemFree"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_free_params_t = _ze_mem_free_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemFree"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemFreeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_free_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemGetAllocProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_get_alloc_properties_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub ppMemAllocProperties: *mut *mut ze_memory_allocation_properties_t, - pub pphDevice: *mut *mut ze_device_handle_t, -} -#[test] -fn bindgen_test_layout__ze_mem_get_alloc_properties_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_get_alloc_properties_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_mem_get_alloc_properties_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_get_alloc_properties_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_mem_get_alloc_properties_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_alloc_properties_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_alloc_properties_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_alloc_properties_params_t>())).pptr as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_alloc_properties_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_alloc_properties_params_t>())).ppMemAllocProperties - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_alloc_properties_params_t), - "::", - stringify!(ppMemAllocProperties) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_alloc_properties_params_t>())).pphDevice as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_alloc_properties_params_t), - "::", - stringify!(pphDevice) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemGetAllocProperties"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_get_alloc_properties_params_t = _ze_mem_get_alloc_properties_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemGetAllocProperties"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemGetAllocPropertiesCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_get_alloc_properties_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemGetAddressRange"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_get_address_range_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub ppBase: *mut *mut *mut ::std::os::raw::c_void, - pub ppSize: *mut *mut usize, -} -#[test] -fn bindgen_test_layout__ze_mem_get_address_range_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_get_address_range_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_mem_get_address_range_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_get_address_range_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_mem_get_address_range_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_address_range_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_address_range_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_address_range_params_t>())).pptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_address_range_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_address_range_params_t>())).ppBase as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_address_range_params_t), - "::", - stringify!(ppBase) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_address_range_params_t>())).ppSize as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_address_range_params_t), - "::", - stringify!(ppSize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemGetAddressRange"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_get_address_range_params_t = _ze_mem_get_address_range_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemGetAddressRange"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemGetAddressRangeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_get_address_range_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemGetIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_get_ipc_handle_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub ppIpcHandle: *mut *mut ze_ipc_mem_handle_t, -} -#[test] -fn bindgen_test_layout__ze_mem_get_ipc_handle_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_get_ipc_handle_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_mem_get_ipc_handle_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_get_ipc_handle_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_mem_get_ipc_handle_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_ipc_handle_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_ipc_handle_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_ipc_handle_params_t>())).pptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_ipc_handle_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_get_ipc_handle_params_t>())).ppIpcHandle as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_get_ipc_handle_params_t), - "::", - stringify!(ppIpcHandle) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemGetIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_get_ipc_handle_params_t = _ze_mem_get_ipc_handle_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemGetIpcHandle"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemGetIpcHandleCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_get_ipc_handle_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemOpenIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_open_ipc_handle_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub phandle: *mut ze_ipc_mem_handle_t, - pub pflags: *mut ze_ipc_memory_flags_t, - pub ppptr: *mut *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_mem_open_ipc_handle_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_open_ipc_handle_params_t>(), - 40usize, - concat!("Size of: ", stringify!(_ze_mem_open_ipc_handle_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_open_ipc_handle_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_mem_open_ipc_handle_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_open_ipc_handle_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_open_ipc_handle_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_open_ipc_handle_params_t>())).phDevice as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_open_ipc_handle_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_open_ipc_handle_params_t>())).phandle as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_open_ipc_handle_params_t), - "::", - stringify!(phandle) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_open_ipc_handle_params_t>())).pflags as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_open_ipc_handle_params_t), - "::", - stringify!(pflags) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_open_ipc_handle_params_t>())).ppptr as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_open_ipc_handle_params_t), - "::", - stringify!(ppptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemOpenIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_open_ipc_handle_params_t = _ze_mem_open_ipc_handle_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemOpenIpcHandle"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemOpenIpcHandleCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_open_ipc_handle_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemCloseIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_mem_close_ipc_handle_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_mem_close_ipc_handle_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_close_ipc_handle_params_t>(), - 16usize, - concat!("Size of: ", stringify!(_ze_mem_close_ipc_handle_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_close_ipc_handle_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_mem_close_ipc_handle_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_close_ipc_handle_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_close_ipc_handle_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_close_ipc_handle_params_t>())).pptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_close_ipc_handle_params_t), - "::", - stringify!(pptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeMemCloseIpcHandle"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_mem_close_ipc_handle_params_t = _ze_mem_close_ipc_handle_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeMemCloseIpcHandle"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnMemCloseIpcHandleCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_mem_close_ipc_handle_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of Mem callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_mem_callbacks_t { - pub pfnAllocSharedCb: ze_pfnMemAllocSharedCb_t, - pub pfnAllocDeviceCb: ze_pfnMemAllocDeviceCb_t, - pub pfnAllocHostCb: ze_pfnMemAllocHostCb_t, - pub pfnFreeCb: ze_pfnMemFreeCb_t, - pub pfnGetAllocPropertiesCb: ze_pfnMemGetAllocPropertiesCb_t, - pub pfnGetAddressRangeCb: ze_pfnMemGetAddressRangeCb_t, - pub pfnGetIpcHandleCb: ze_pfnMemGetIpcHandleCb_t, - pub pfnOpenIpcHandleCb: ze_pfnMemOpenIpcHandleCb_t, - pub pfnCloseIpcHandleCb: ze_pfnMemCloseIpcHandleCb_t, -} -#[test] -fn bindgen_test_layout__ze_mem_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_mem_callbacks_t>(), - 72usize, - concat!("Size of: ", stringify!(_ze_mem_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_mem_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_mem_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnAllocSharedCb as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnAllocSharedCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnAllocDeviceCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnAllocDeviceCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnAllocHostCb as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnAllocHostCb) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnFreeCb as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnFreeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnGetAllocPropertiesCb as *const _ - as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnGetAllocPropertiesCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnGetAddressRangeCb as *const _ - as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnGetAddressRangeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnGetIpcHandleCb as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnGetIpcHandleCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnOpenIpcHandleCb as *const _ as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnOpenIpcHandleCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_mem_callbacks_t>())).pfnCloseIpcHandleCb as *const _ as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_ze_mem_callbacks_t), - "::", - stringify!(pfnCloseIpcHandleCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of Mem callback functions pointers"] -pub type ze_mem_callbacks_t = _ze_mem_callbacks_t; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemReserve"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_virtual_mem_reserve_params_t { - pub phContext: *mut ze_context_handle_t, - pub ppStart: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, - pub ppptr: *mut *mut *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_reserve_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_reserve_params_t>(), - 32usize, - concat!("Size of: ", stringify!(_ze_virtual_mem_reserve_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_reserve_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_virtual_mem_reserve_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_reserve_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_reserve_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_reserve_params_t>())).ppStart as *const _ - as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_reserve_params_t), - "::", - stringify!(ppStart) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_reserve_params_t>())).psize as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_reserve_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_reserve_params_t>())).ppptr as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_reserve_params_t), - "::", - stringify!(ppptr) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemReserve"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_virtual_mem_reserve_params_t = _ze_virtual_mem_reserve_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeVirtualMemReserve"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnVirtualMemReserveCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_virtual_mem_reserve_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemFree"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_virtual_mem_free_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_free_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_free_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_virtual_mem_free_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_free_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_virtual_mem_free_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_free_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_free_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_free_params_t>())).pptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_free_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_free_params_t>())).psize as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_free_params_t), - "::", - stringify!(psize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemFree"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_virtual_mem_free_params_t = _ze_virtual_mem_free_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeVirtualMemFree"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnVirtualMemFreeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_virtual_mem_free_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemQueryPageSize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_virtual_mem_query_page_size_params_t { - pub phContext: *mut ze_context_handle_t, - pub phDevice: *mut ze_device_handle_t, - pub psize: *mut usize, - pub ppagesize: *mut *mut usize, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_query_page_size_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_query_page_size_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_virtual_mem_query_page_size_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_query_page_size_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_virtual_mem_query_page_size_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_query_page_size_params_t>())).phContext - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_query_page_size_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_query_page_size_params_t>())).phDevice - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_query_page_size_params_t), - "::", - stringify!(phDevice) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_query_page_size_params_t>())).psize as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_query_page_size_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_query_page_size_params_t>())).ppagesize - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_query_page_size_params_t), - "::", - stringify!(ppagesize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemQueryPageSize"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_virtual_mem_query_page_size_params_t = _ze_virtual_mem_query_page_size_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeVirtualMemQueryPageSize"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnVirtualMemQueryPageSizeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_virtual_mem_query_page_size_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemMap"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_virtual_mem_map_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, - pub phPhysicalMemory: *mut ze_physical_mem_handle_t, - pub poffset: *mut usize, - pub paccess: *mut ze_memory_access_attribute_t, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_map_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_map_params_t>(), - 48usize, - concat!("Size of: ", stringify!(_ze_virtual_mem_map_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_map_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_virtual_mem_map_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_map_params_t>())).phContext as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_map_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_map_params_t>())).pptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_map_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_map_params_t>())).psize as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_map_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_map_params_t>())).phPhysicalMemory as *const _ - as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_map_params_t), - "::", - stringify!(phPhysicalMemory) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_map_params_t>())).poffset as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_map_params_t), - "::", - stringify!(poffset) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_map_params_t>())).paccess as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_map_params_t), - "::", - stringify!(paccess) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemMap"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_virtual_mem_map_params_t = _ze_virtual_mem_map_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeVirtualMemMap"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnVirtualMemMapCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_virtual_mem_map_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemUnmap"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_virtual_mem_unmap_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_unmap_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_unmap_params_t>(), - 24usize, - concat!("Size of: ", stringify!(_ze_virtual_mem_unmap_params_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_unmap_params_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_virtual_mem_unmap_params_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_unmap_params_t>())).phContext as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_unmap_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_unmap_params_t>())).pptr as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_unmap_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_unmap_params_t>())).psize as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_unmap_params_t), - "::", - stringify!(psize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemUnmap"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_virtual_mem_unmap_params_t = _ze_virtual_mem_unmap_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeVirtualMemUnmap"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnVirtualMemUnmapCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_virtual_mem_unmap_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemSetAccessAttribute"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_virtual_mem_set_access_attribute_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, - pub paccess: *mut ze_memory_access_attribute_t, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_set_access_attribute_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_set_access_attribute_params_t>(), - 32usize, - concat!( - "Size of: ", - stringify!(_ze_virtual_mem_set_access_attribute_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_set_access_attribute_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_virtual_mem_set_access_attribute_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_set_access_attribute_params_t>())).phContext - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_set_access_attribute_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_set_access_attribute_params_t>())).pptr - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_set_access_attribute_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_set_access_attribute_params_t>())).psize - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_set_access_attribute_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_set_access_attribute_params_t>())).paccess - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_set_access_attribute_params_t), - "::", - stringify!(paccess) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemSetAccessAttribute"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_virtual_mem_set_access_attribute_params_t = - _ze_virtual_mem_set_access_attribute_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeVirtualMemSetAccessAttribute"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnVirtualMemSetAccessAttributeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_virtual_mem_set_access_attribute_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemGetAccessAttribute"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _ze_virtual_mem_get_access_attribute_params_t { - pub phContext: *mut ze_context_handle_t, - pub pptr: *mut *const ::std::os::raw::c_void, - pub psize: *mut usize, - pub paccess: *mut *mut ze_memory_access_attribute_t, - pub poutSize: *mut *mut usize, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_get_access_attribute_params_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_get_access_attribute_params_t>(), - 40usize, - concat!( - "Size of: ", - stringify!(_ze_virtual_mem_get_access_attribute_params_t) - ) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_get_access_attribute_params_t>(), - 8usize, - concat!( - "Alignment of ", - stringify!(_ze_virtual_mem_get_access_attribute_params_t) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_get_access_attribute_params_t>())).phContext - as *const _ as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_get_access_attribute_params_t), - "::", - stringify!(phContext) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_get_access_attribute_params_t>())).pptr - as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_get_access_attribute_params_t), - "::", - stringify!(pptr) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_get_access_attribute_params_t>())).psize - as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_get_access_attribute_params_t), - "::", - stringify!(psize) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_get_access_attribute_params_t>())).paccess - as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_get_access_attribute_params_t), - "::", - stringify!(paccess) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_get_access_attribute_params_t>())).poutSize - as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_get_access_attribute_params_t), - "::", - stringify!(poutSize) - ) - ); -} -#[doc = ""] -#[doc = " @brief Callback function parameters for zeVirtualMemGetAccessAttribute"] -#[doc = " @details Each entry is a pointer to the parameter passed to the function;"] -#[doc = " allowing the callback the ability to modify the parameter's value"] -pub type ze_virtual_mem_get_access_attribute_params_t = - _ze_virtual_mem_get_access_attribute_params_t; -#[doc = ""] -#[doc = " @brief Callback function-pointer for zeVirtualMemGetAccessAttribute"] -#[doc = " @param[in] params Parameters passed to this instance"] -#[doc = " @param[in] result Return value"] -#[doc = " @param[in] pTracerUserData Per-Tracer user data"] -#[doc = " @param[in,out] ppTracerInstanceUserData Per-Tracer, Per-Instance user data"] -pub type ze_pfnVirtualMemGetAccessAttributeCb_t = ::std::option::Option< - unsafe extern "C" fn( - params: *mut ze_virtual_mem_get_access_attribute_params_t, - result: ze_result_t, - pTracerUserData: *mut ::std::os::raw::c_void, - ppTracerInstanceUserData: *mut *mut ::std::os::raw::c_void, - ), ->; -#[doc = ""] -#[doc = " @brief Table of VirtualMem callback functions pointers"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_virtual_mem_callbacks_t { - pub pfnReserveCb: ze_pfnVirtualMemReserveCb_t, - pub pfnFreeCb: ze_pfnVirtualMemFreeCb_t, - pub pfnQueryPageSizeCb: ze_pfnVirtualMemQueryPageSizeCb_t, - pub pfnMapCb: ze_pfnVirtualMemMapCb_t, - pub pfnUnmapCb: ze_pfnVirtualMemUnmapCb_t, - pub pfnSetAccessAttributeCb: ze_pfnVirtualMemSetAccessAttributeCb_t, - pub pfnGetAccessAttributeCb: ze_pfnVirtualMemGetAccessAttributeCb_t, -} -#[test] -fn bindgen_test_layout__ze_virtual_mem_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_virtual_mem_callbacks_t>(), - 56usize, - concat!("Size of: ", stringify!(_ze_virtual_mem_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_virtual_mem_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_virtual_mem_callbacks_t)) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_callbacks_t>())).pfnReserveCb as *const _ - as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_callbacks_t), - "::", - stringify!(pfnReserveCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_callbacks_t>())).pfnFreeCb as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_callbacks_t), - "::", - stringify!(pfnFreeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_callbacks_t>())).pfnQueryPageSizeCb as *const _ - as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_callbacks_t), - "::", - stringify!(pfnQueryPageSizeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_callbacks_t>())).pfnMapCb as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_callbacks_t), - "::", - stringify!(pfnMapCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_callbacks_t>())).pfnUnmapCb as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_callbacks_t), - "::", - stringify!(pfnUnmapCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_callbacks_t>())).pfnSetAccessAttributeCb - as *const _ as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_callbacks_t), - "::", - stringify!(pfnSetAccessAttributeCb) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::<_ze_virtual_mem_callbacks_t>())).pfnGetAccessAttributeCb - as *const _ as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_virtual_mem_callbacks_t), - "::", - stringify!(pfnGetAccessAttributeCb) - ) - ); -} -#[doc = ""] -#[doc = " @brief Table of VirtualMem callback functions pointers"] -pub type ze_virtual_mem_callbacks_t = _ze_virtual_mem_callbacks_t; -#[doc = ""] -#[doc = " @brief Container for all callbacks"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct _ze_callbacks_t { - pub Global: ze_global_callbacks_t, - pub Driver: ze_driver_callbacks_t, - pub Device: ze_device_callbacks_t, - pub Context: ze_context_callbacks_t, - pub CommandQueue: ze_command_queue_callbacks_t, - pub CommandList: ze_command_list_callbacks_t, - pub Fence: ze_fence_callbacks_t, - pub EventPool: ze_event_pool_callbacks_t, - pub Event: ze_event_callbacks_t, - pub Image: ze_image_callbacks_t, - pub Module: ze_module_callbacks_t, - pub ModuleBuildLog: ze_module_build_log_callbacks_t, - pub Kernel: ze_kernel_callbacks_t, - pub Sampler: ze_sampler_callbacks_t, - pub PhysicalMem: ze_physical_mem_callbacks_t, - pub Mem: ze_mem_callbacks_t, - pub VirtualMem: ze_virtual_mem_callbacks_t, -} -#[test] -fn bindgen_test_layout__ze_callbacks_t() { - assert_eq!( - ::std::mem::size_of::<_ze_callbacks_t>(), - 960usize, - concat!("Size of: ", stringify!(_ze_callbacks_t)) - ); - assert_eq!( - ::std::mem::align_of::<_ze_callbacks_t>(), - 8usize, - concat!("Alignment of ", stringify!(_ze_callbacks_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Global as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Global) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Driver as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Driver) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Device as *const _ as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Device) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Context as *const _ as usize }, - 160usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Context) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).CommandQueue as *const _ as usize }, - 224usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(CommandQueue) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).CommandList as *const _ as usize }, - 256usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(CommandList) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Fence as *const _ as usize }, - 464usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Fence) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).EventPool as *const _ as usize }, - 504usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(EventPool) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Event as *const _ as usize }, - 544usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Event) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Image as *const _ as usize }, - 600usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Image) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Module as *const _ as usize }, - 624usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Module) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).ModuleBuildLog as *const _ as usize }, - 688usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(ModuleBuildLog) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Kernel as *const _ as usize }, - 704usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Kernel) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Sampler as *const _ as usize }, - 800usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Sampler) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).PhysicalMem as *const _ as usize }, - 816usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(PhysicalMem) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).Mem as *const _ as usize }, - 832usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(Mem) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::<_ze_callbacks_t>())).VirtualMem as *const _ as usize }, - 904usize, - concat!( - "Offset of field: ", - stringify!(_ze_callbacks_t), - "::", - stringify!(VirtualMem) - ) - ); -} -#[doc = ""] -#[doc = " @brief Container for all callbacks"] -pub type ze_callbacks_t = _ze_callbacks_t; diff --git a/level_zero/Cargo.toml b/level_zero/Cargo.toml deleted file mode 100644 index 851159de..00000000 --- a/level_zero/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "level_zero" -version = "0.1.0" -authors = ["Andrzej Janik "] -edition = "2018" - -[lib] - -[dependencies] -level_zero-sys = { path = "../level_zero-sys" } - -[dependencies.ocl-core] -version = "0.11" -features = ["opencl_version_1_2", "opencl_version_2_0", "opencl_version_2_1"] \ No newline at end of file diff --git a/level_zero/README b/level_zero/README deleted file mode 100644 index b9785bf9..00000000 --- a/level_zero/README +++ /dev/null @@ -1 +0,0 @@ -More ergonomic bindings for oneAPI Level Zero diff --git a/level_zero/src/lib.rs b/level_zero/src/lib.rs deleted file mode 100644 index bdc25a8c..00000000 --- a/level_zero/src/lib.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub use level_zero_sys as sys; - -pub mod ze; -pub use ze::*; \ No newline at end of file diff --git a/level_zero/src/ze.rs b/level_zero/src/ze.rs deleted file mode 100644 index ef44c9d2..00000000 --- a/level_zero/src/ze.rs +++ /dev/null @@ -1,1289 +0,0 @@ -use sys::zeFenceDestroy; - -use crate::sys; -use std::{ - ffi::{c_void, CStr, CString}, - fmt::Debug, - marker::PhantomData, - mem, - ptr::{self, NonNull}, -}; - -/* - This module is not a user-friendly, safe binding. The problem is tracking - object lifetimes. E.g. kernel object cannot outlive module object. - While Rust is relatively good at it, it's tricky to translate it to a safe - API in a way that we can mix and match them, but here's I'd sketch it: - - There's no &mut references: all API operations copy data in and out - - All baseline objects are Send, but not Sync - - There are some problems with using "naked" Rc and Arc: - - We should not allow users to create Rc by themselves without including - parent pointer - - We should not allow DerefMut in Mutex and moving out of it - - Objects are wrapped in Rc> and Arc>, parent - pointer is part of ZeCell/ZeMutex: - - Then e.g. zeKernelCreate is mapped three times: - - unsafe Module(&self) -> Kernel - - Module(&Rc>) -> Rc> - - Module(&Arc>) -> Arc - - You create ZeCell by moving Module and Rc - - Pro: Rc and Arc are allowed to be self receivers - - Open question: should some operations take the parent mutex? If so, should - it be done recursively? -*/ - -macro_rules! check { - ($expr:expr) => { - #[allow(unused_unsafe)] - { - let err = unsafe { $expr }; - if err != crate::sys::ze_result_t::ZE_RESULT_SUCCESS { - return Result::Err(err); - } - } - }; -} - -macro_rules! check_panic { - ($expr:expr) => { - let err = unsafe { $expr }; - if err != crate::sys::ze_result_t::ZE_RESULT_SUCCESS { - panic!(err); - } - }; -} - -pub type Result = std::result::Result; - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Error(pub sys::ze_result_t); - -pub fn init() -> Result<()> { - match unsafe { sys::zeInit(sys::ze_init_flags_t::ZE_INIT_FLAG_GPU_ONLY) } { - sys::ze_result_t::ZE_RESULT_SUCCESS => Ok(()), - e => Err(e), - } -} - -// Mutability: no (list of allocations is under a mutex) -// Lifetime: 'static -#[repr(transparent)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct Driver(NonNull); - -unsafe impl Send for Driver {} -unsafe impl Sync for Driver {} - -impl Driver { - pub unsafe fn as_ffi(self) -> sys::ze_driver_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_driver_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x)) - } - - pub fn get() -> Result> { - let mut len = 0; - let mut temp = ptr::null_mut(); - check!(sys::zeDriverGet(&mut len, &mut temp)); - let mut result = Vec::with_capacity(len as usize); - check!(sys::zeDriverGet(&mut len, result.as_mut_ptr() as *mut _)); - unsafe { - result.set_len(len as usize); - } - Ok(result) - } - - pub fn devices(self) -> Result> { - let mut len = 0; - let mut temp = ptr::null_mut(); - check!(sys::zeDeviceGet(self.as_ffi(), &mut len, &mut temp)); - let mut result = Vec::with_capacity(len as usize); - check!(sys::zeDeviceGet( - self.as_ffi(), - &mut len, - result.as_mut_ptr() as *mut _ - )); - unsafe { - result.set_len(len as usize); - } - Ok(result) - } - - pub fn get_properties(self, props: &mut sys::ze_driver_properties_t) -> Result<()> { - check!(sys::zeDriverGetProperties(self.as_ffi(), props)); - Ok(()) - } -} - -// Mutability: no (list of peer allocations under a mutex) -// Lifetime: 'static -#[repr(transparent)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct Device(NonNull); - -unsafe impl Send for Device {} -unsafe impl Sync for Device {} - -impl Device { - pub unsafe fn as_ffi(self) -> sys::ze_device_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_device_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x)) - } - - pub fn get_properties(self, props: &mut sys::ze_device_properties_t) -> Result<()> { - check! { sys::zeDeviceGetProperties(self.as_ffi(), props) }; - Ok(()) - } - - pub fn get_image_properties(self, props: &mut sys::ze_device_image_properties_t) -> Result<()> { - check! { sys::zeDeviceGetImageProperties(self.as_ffi(), props) }; - Ok(()) - } - - pub fn get_memory_properties(self) -> Result> { - let mut count = 0u32; - check! { sys::zeDeviceGetMemoryProperties(self.as_ffi(), &mut count, ptr::null_mut()) }; - if count == 0 { - return Ok(Vec::new()); - } - let mut props = - vec![unsafe { mem::zeroed::() }; count as usize]; - check! { sys::zeDeviceGetMemoryProperties(self.as_ffi(), &mut count, props.as_mut_ptr()) }; - Ok(props) - } - - pub fn get_compute_properties( - self, - props: &mut sys::ze_device_compute_properties_t, - ) -> Result<()> { - check! { sys::zeDeviceGetComputeProperties(self.as_ffi(), props) }; - Ok(()) - } -} - -// Mutability: no -#[repr(transparent)] -pub struct Context(NonNull); - -unsafe impl Send for Context {} -unsafe impl Sync for Context {} - -impl Context { - pub unsafe fn as_ffi(&self) -> sys::ze_context_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_context_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x)) - } - - pub fn new(drv: Driver, devices: Option<&[Device]>) -> Result { - let ctx_desc = sys::ze_context_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_CONTEXT_DESC, - pNext: ptr::null(), - flags: sys::ze_context_flags_t(0), - }; - let mut result = ptr::null_mut(); - let (dev_ptr, dev_len) = match devices { - None => (ptr::null(), 0), - Some(devs) => (devs.as_ptr(), devs.len()), - }; - check!(sys::zeContextCreateEx( - drv.as_ffi(), - &ctx_desc, - dev_len as u32, - dev_ptr as *mut _, - &mut result - )); - Ok(unsafe { Self::from_ffi(result) }) - } - - pub fn mem_alloc_device( - &self, - size: usize, - alignment: usize, - device: Device, - ) -> Result<*mut c_void> { - let descr = sys::ze_device_mem_alloc_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_DEVICE_MEM_ALLOC_DESC, - pNext: ptr::null(), - flags: sys::ze_device_mem_alloc_flags_t(0), - ordinal: 0, - }; - let mut result = ptr::null_mut(); - check! { - sys::zeMemAllocDevice( - self.as_ffi(), - &descr, - size, - alignment, - device.as_ffi(), - &mut result, - ) - }; - Ok(result) - } - - // This operation is safe because Level Zero impl tracks allocations - pub fn mem_free(&self, ptr: *mut c_void) -> Result<()> { - check! { - sys::zeMemFree( - self.as_ffi(), - ptr, - ) - }; - Ok(()) - } -} - -impl Drop for Context { - #[allow(unused_must_use)] - fn drop(&mut self) { - check_panic! { sys::zeContextDestroy(self.as_ffi()) }; - } -} - -// Mutability: yes (residency container and others) -// Lifetime parent: Context -#[repr(transparent)] -pub struct CommandQueue<'a>( - NonNull, - PhantomData<&'a ()>, -); - -unsafe impl<'a> Send for CommandQueue<'a> {} - -impl<'a> CommandQueue<'a> { - pub unsafe fn as_ffi(&self) -> sys::ze_command_queue_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_command_queue_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x), PhantomData) - } - - pub fn new(ctx: &'a Context, d: Device) -> Result { - let que_desc = sys::ze_command_queue_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_COMMAND_QUEUE_DESC, - pNext: ptr::null(), - ordinal: 0, - index: 0, - flags: sys::ze_command_queue_flags_t(0), - mode: sys::ze_command_queue_mode_t::ZE_COMMAND_QUEUE_MODE_DEFAULT, - priority: sys::ze_command_queue_priority_t::ZE_COMMAND_QUEUE_PRIORITY_NORMAL, - }; - let mut result = ptr::null_mut(); - check!(sys::zeCommandQueueCreate( - ctx.as_ffi(), - d.as_ffi(), - &que_desc, - &mut result - )); - Ok(unsafe { Self::from_ffi(result) }) - } - - pub fn execute_and_synchronize<'cmd_list>( - &'a self, - cmd: CommandList<'cmd_list>, - ) -> Result> - where - 'a: 'cmd_list, - { - let fence_guard = FenceGuard::new(self, cmd)?; - unsafe { self.execute(&fence_guard.1, Some(&fence_guard.0))? }; - Ok(fence_guard) - } - - pub unsafe fn execute<'cmd_list, 'fence>( - &self, - cmd: &CommandList<'cmd_list>, - fence: Option<&Fence<'fence>>, - ) -> Result<()> - where - 'cmd_list: 'fence, - 'a: 'cmd_list, - { - let fence_ptr = fence.map_or(ptr::null_mut(), |f| f.as_ffi()); - check!(sys::zeCommandQueueExecuteCommandLists( - self.as_ffi(), - 1, - &mut cmd.as_ffi(), - fence_ptr - )); - Ok(()) - } - - pub fn synchronize(&self, timeout_ns: u64) -> Result<()> { - check!(sys::zeCommandQueueSynchronize(self.as_ffi(), timeout_ns)); - Ok(()) - } -} - -impl<'a> Drop for CommandQueue<'a> { - #[allow(unused_must_use)] - fn drop(&mut self) { - check_panic! { sys::zeCommandQueueDestroy(self.as_ffi()) }; - } -} - -pub struct FenceGuard<'a>(Fence<'a>, CommandList<'a>); - -impl<'a> FenceGuard<'a> { - fn new(q: &'a CommandQueue, cmd_list: CommandList<'a>) -> Result { - Ok(FenceGuard(Fence::new(q)?, cmd_list)) - } -} - -impl<'a> Drop for FenceGuard<'a> { - #[allow(unused_must_use)] - fn drop(&mut self) { - if let Err(e) = self.0.host_synchronize() { - panic!(e) - } - } -} - -// Mutability: yes (reset) -// Lifetime parent: queue -#[repr(transparent)] -pub struct Fence<'a>(NonNull, PhantomData<&'a ()>); - -unsafe impl<'a> Send for Fence<'a> {} - -impl<'a> Fence<'a> { - pub unsafe fn as_ffi(&self) -> sys::ze_fence_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_fence_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x), PhantomData) - } - - pub fn new(queue: &'a CommandQueue) -> Result { - let desc = sys::_ze_fence_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_FENCE_DESC, - pNext: ptr::null(), - flags: sys::ze_fence_flags_t(0), - }; - let mut result = ptr::null_mut(); - check!(sys::zeFenceCreate(queue.as_ffi(), &desc, &mut result)); - Ok(unsafe { Self::from_ffi(result) }) - } - - pub fn host_synchronize(&self) -> Result<()> { - check!(sys::zeFenceHostSynchronize(self.as_ffi(), u64::max_value())); - Ok(()) - } -} - -impl<'a> Drop for Fence<'a> { - fn drop(&mut self) { - check_panic! { zeFenceDestroy(self.as_ffi()) }; - } -} - -// Mutability: yes (building, linking) -// Lifetime parent: Context -#[repr(transparent)] -pub struct Module<'a>(NonNull, PhantomData<&'a ()>); - -unsafe impl<'a> Send for Module<'a> {} - -impl<'a> Module<'a> { - pub unsafe fn as_ffi(&self) -> sys::ze_module_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_module_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x), PhantomData) - } - - // HACK ALERT - // We use OpenCL for now to do SPIR-V linking, because Level0 - // does not allow linking. Don't let presence of zeModuleDynamicLink fool - // you, it's not currently possible to create non-compiled modules. - // zeModuleCreate always compiles (builds and links). - pub fn build_link_spirv<'buffers>( - ctx: &'a Context, - d: Device, - binaries: &[&'buffers [u8]], - opts: Option<&CStr>, - ) -> (Result, Option) { - let ocl_program = match Self::build_link_spirv_impl(binaries, opts) { - Err(_) => { - return ( - Err(sys::ze_result_t::ZE_RESULT_ERROR_MODULE_LINK_FAILURE), - None, - ) - } - Ok(prog) => prog, - }; - match ocl_core::get_program_info(&ocl_program, ocl_core::ProgramInfo::Binaries) { - Ok(ocl_core::ProgramInfoResult::Binaries(binaries)) => { - let (module, build_log) = Self::build_native_logged(ctx, d, &binaries[0]); - (module, Some(build_log)) - } - _ => return (Err(sys::ze_result_t::ZE_RESULT_ERROR_UNKNOWN), None), - } - } - - fn build_link_spirv_impl<'buffers>( - binaries: &[&'buffers [u8]], - opts: Option<&CStr>, - ) -> ocl_core::Result { - let platforms = ocl_core::get_platform_ids()?; - let (platform, device) = platforms - .iter() - .find_map(|plat| { - let devices = - ocl_core::get_device_ids(plat, Some(ocl_core::DeviceType::GPU), None).ok()?; - for dev in devices { - let vendor = - ocl_core::get_device_info(dev, ocl_core::DeviceInfo::VendorId).ok()?; - if let ocl_core::DeviceInfoResult::VendorId(0x8086) = vendor { - let dev_type = - ocl_core::get_device_info(dev, ocl_core::DeviceInfo::Type).ok()?; - if let ocl_core::DeviceInfoResult::Type(ocl_core::DeviceType::GPU) = - dev_type - { - return Some((plat.clone(), dev)); - } - } - } - None - }) - .ok_or("")?; - let ctx_props = ocl_core::ContextProperties::new().platform(platform); - let ocl_ctx = ocl_core::create_context_from_type::( - Some(&ctx_props), - ocl_core::DeviceType::GPU, - None, - None, - )?; - let mut programs = Vec::with_capacity(binaries.len()); - for binary in binaries { - programs.push(ocl_core::create_program_with_il(&ocl_ctx, binary, None)?); - } - let options = match opts { - Some(o) => o.to_owned(), - None => CString::default(), - }; - for program in programs.iter() { - ocl_core::compile_program( - program, - Some(&[device]), - &options, - &[], - &[], - None, - None, - None, - )?; - } - ocl_core::link_program::( - &ocl_ctx, - Some(&[device]), - &options, - &programs.iter().collect::>(), - None, - None, - None, - ) - } - - pub fn build_spirv( - ctx: &'a Context, - d: Device, - bin: &[u8], - opts: Option<&CStr>, - ) -> Result { - Module::new(ctx, true, d, bin, opts) - } - - pub fn build_spirv_logged( - ctx: &'a Context, - d: Device, - bin: &[u8], - opts: Option<&CStr>, - ) -> (Result, BuildLog) { - Module::new_logged(ctx, true, d, bin, opts) - } - - pub fn build_native_logged( - ctx: &'a Context, - d: Device, - bin: &[u8], - ) -> (Result, BuildLog) { - Module::new_logged(ctx, false, d, bin, None) - } - - fn new( - ctx: &'a Context, - spirv: bool, - d: Device, - bin: &[u8], - opts: Option<&CStr>, - ) -> Result { - let desc = sys::ze_module_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_MODULE_DESC, - pNext: ptr::null(), - format: if spirv { - sys::ze_module_format_t::ZE_MODULE_FORMAT_IL_SPIRV - } else { - sys::ze_module_format_t::ZE_MODULE_FORMAT_NATIVE - }, - inputSize: bin.len(), - pInputModule: bin.as_ptr(), - pBuildFlags: opts.map(|s| s.as_ptr() as *const _).unwrap_or(ptr::null()), - pConstants: ptr::null(), - }; - let mut result: sys::ze_module_handle_t = ptr::null_mut(); - check! { - sys::zeModuleCreate( - ctx.as_ffi(), - d.as_ffi(), - &desc, - &mut result, - ptr::null_mut(), - ) - }; - Ok(unsafe { Self::from_ffi(result) }) - } - - fn new_logged( - ctx: &'a Context, - spirv: bool, - d: Device, - bin: &[u8], - opts: Option<&CStr>, - ) -> (Result, BuildLog) { - let desc = sys::ze_module_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_MODULE_DESC, - pNext: ptr::null(), - format: if spirv { - sys::ze_module_format_t::ZE_MODULE_FORMAT_IL_SPIRV - } else { - sys::ze_module_format_t::ZE_MODULE_FORMAT_NATIVE - }, - inputSize: bin.len(), - pInputModule: bin.as_ptr(), - pBuildFlags: opts.map(|s| s.as_ptr() as *const _).unwrap_or(ptr::null()), - pConstants: ptr::null(), - }; - let mut result: sys::ze_module_handle_t = ptr::null_mut(); - let mut log_handle = ptr::null_mut(); - let err = unsafe { - sys::zeModuleCreate( - ctx.as_ffi(), - d.as_ffi(), - &desc, - &mut result, - &mut log_handle, - ) - }; - let log = unsafe { BuildLog::from_ffi(log_handle) }; - if err != sys::ze_result_t::ZE_RESULT_SUCCESS { - (Result::Err(err), log) - } else { - (Ok(unsafe { Self::from_ffi(result) }), log) - } - } - - pub fn get_global_pointer(&self, global_name: &CStr) -> Result<(usize, *mut c_void)> { - let slice = global_name.to_bytes_with_nul(); - let mut result_size = 0; - let mut result_ptr = ptr::null_mut(); - check!(sys::zeModuleGetGlobalPointer( - self.as_ffi(), - slice.as_ptr() as *const _, - &mut result_size, - &mut result_ptr, - )); - Ok((result_size, result_ptr)) - } - - pub fn dynamic_link(modules: &[&Module]) -> Result<()> { - unsafe { - Self::with_raw_slice(modules, |num, ptr| { - check!(sys::zeModuleDynamicLink(num, ptr, ptr::null_mut())); - Ok(()) - }) - } - } - - unsafe fn with_raw_slice<'x, T>( - modules: &[&Module<'x>], - f: impl FnOnce(u32, *mut sys::ze_module_handle_t) -> T, - ) -> T { - let (ptr, mod_vec) = match modules { - [] => (ptr::null_mut(), None), - [e] => (&e.0 as *const _ as *mut _, None), - _ => { - let mut ev_vec = modules.iter().map(|e| e.as_ffi()).collect::>(); - (ev_vec.as_mut_ptr(), Some(ev_vec)) - } - }; - let result = f(modules.len() as u32, ptr); - drop(mod_vec); - result - } -} - -impl<'a> Drop for Module<'a> { - #[allow(unused_must_use)] - fn drop(&mut self) { - check_panic! { sys::zeModuleDestroy(self.as_ffi()) }; - } -} - -// Mutability: none -// Lifetime parent: none, but need to destroy -pub struct BuildLog(NonNull); - -unsafe impl Sync for BuildLog {} -unsafe impl Send for BuildLog {} - -impl BuildLog { - pub unsafe fn as_ffi(&self) -> sys::ze_module_build_log_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_module_build_log_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x)) - } - - pub fn to_cstring(&self) -> Result { - let mut size = 0; - check! { sys::zeModuleBuildLogGetString(self.as_ffi(), &mut size, ptr::null_mut()) }; - let mut str_vec = vec![0u8; size]; - check! { sys::zeModuleBuildLogGetString(self.as_ffi(), &mut size, str_vec.as_mut_ptr() as *mut i8) }; - str_vec.push(0); - Ok(unsafe { CString::from_vec_unchecked(str_vec) }) - } -} - -impl Drop for BuildLog { - fn drop(&mut self) { - check_panic!(sys::zeModuleBuildLogDestroy(self.as_ffi())); - } -} - -// Mutability: none -// Lifetime parent: Context -pub struct DeviceBuffer<'a, T: Copy> { - ptr: *mut c_void, - ctx: sys::ze_context_handle_t, - len: usize, - marker: PhantomData<&'a T>, -} - -unsafe impl<'a, T: Copy> Sync for DeviceBuffer<'a, T> {} -unsafe impl<'a, T: Copy> Send for DeviceBuffer<'a, T> {} - -impl<'a, T: Copy> DeviceBuffer<'a, T> { - pub unsafe fn as_ffi(&self) -> (sys::ze_context_handle_t, *mut c_void, usize) { - (self.ctx, self.ptr, self.len) - } - pub unsafe fn from_ffi(ctx: sys::ze_context_handle_t, ptr: *mut c_void, len: usize) -> Self { - let marker = PhantomData::<&'a T>; - Self { - ptr, - ctx, - len, - marker, - } - } - - pub fn new(ctx: &'a Context, dev: Device, len: usize) -> Result { - let desc = sys::_ze_device_mem_alloc_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_DEVICE_MEM_ALLOC_DESC, - pNext: ptr::null(), - flags: sys::ze_device_mem_alloc_flags_t(0), - ordinal: 0, - }; - let mut result = ptr::null_mut(); - check!(sys::zeMemAllocDevice( - ctx.as_ffi(), - &desc, - len * mem::size_of::(), - mem::align_of::(), - dev.as_ffi(), - &mut result - )); - Ok(unsafe { Self::from_ffi(ctx.as_ffi(), result, len) }) - } - - pub fn len(&self) -> usize { - self.len - } - - pub fn data(&self) -> *mut c_void { - self.ptr - } -} - -impl<'a, T: Copy> Drop for DeviceBuffer<'a, T> { - fn drop(&mut self) { - check_panic! { sys::zeMemFree(self.ctx, self.ptr) }; - } -} - -// Mutability: yes (appends) -// Lifetime parent: Context -pub struct CommandList<'a>(NonNull, PhantomData<&'a ()>); - -unsafe impl<'a> Send for CommandList<'a> {} - -impl<'a> CommandList<'a> { - pub unsafe fn as_ffi(&self) -> sys::ze_command_list_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_command_list_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x), PhantomData) - } - - pub fn new(ctx: &'a Context, dev: Device) -> Result { - let desc = sys::ze_command_list_desc_t { - stype: sys::_ze_structure_type_t::ZE_STRUCTURE_TYPE_COMMAND_LIST_DESC, - commandQueueGroupOrdinal: 0, - pNext: ptr::null(), - flags: sys::ze_command_list_flags_t(0), - }; - let mut result: sys::ze_command_list_handle_t = ptr::null_mut(); - check!(sys::zeCommandListCreate( - ctx.as_ffi(), - dev.as_ffi(), - &desc, - &mut result - )); - Ok(unsafe { Self::from_ffi(result) }) - } - - pub fn new_immediate(ctx: &'a Context, dev: Device) -> Result { - let queue_desc = sys::ze_command_queue_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_COMMAND_QUEUE_DESC, - pNext: ptr::null(), - ordinal: 0, - index: 0, - flags: sys::ze_command_queue_flags_t(0), - mode: sys::ze_command_queue_mode_t::ZE_COMMAND_QUEUE_MODE_DEFAULT, - priority: sys::ze_command_queue_priority_t::ZE_COMMAND_QUEUE_PRIORITY_NORMAL, - }; - let mut result: sys::ze_command_list_handle_t = ptr::null_mut(); - check!(sys::zeCommandListCreateImmediate( - ctx.as_ffi(), - dev.as_ffi(), - &queue_desc, - &mut result - )); - Ok(unsafe { Self::from_ffi(result) }) - } - - pub unsafe fn append_memory_copy< - 'dep, - T: 'a + 'dep + Copy + Sized, - Dst: Into>, - Src: Into>, - >( - &self, - dst: Dst, - src: Src, - signal: Option<&Event<'dep>>, - wait: &[&'dep Event<'dep>], - ) -> Result<()> { - let dst = dst.into(); - let src = src.into(); - let elements = std::cmp::min(dst.len(), src.len()); - let length = elements * mem::size_of::(); - self.append_memory_copy_raw(dst.as_mut_ptr(), src.as_ptr(), length, signal, wait) - } - - pub unsafe fn append_memory_copy_raw( - &self, - dst: *mut c_void, - src: *const c_void, - length: usize, - signal: Option<&Event>, - wait: &[&Event], - ) -> Result<()> { - let signal_event = signal.map_or(ptr::null_mut(), |e| e.as_ffi()); - Event::with_raw_slice(wait, |wait_len, wait_ptr| { - check!(sys::zeCommandListAppendMemoryCopy( - self.as_ffi(), - dst, - src, - length, - signal_event, - wait_len, - wait_ptr - )); - Ok(()) - }) - } - - pub unsafe fn append_memory_fill<'dep, T: Copy + Sized + 'dep, Dst: Into>>( - &'a self, - dst: Dst, - pattern: &T, - signal: Option<&Event<'dep>>, - wait: &[&'dep Event<'dep>], - ) -> Result<()> { - let dst = dst.into(); - let raw_pattern = pattern as *const _ as *const _; - let signal_event = signal.map_or(ptr::null_mut(), |e| e.as_ffi()); - Event::with_raw_slice(wait, |wait_len, wait_ptr| { - check!(sys::zeCommandListAppendMemoryFill( - self.as_ffi(), - dst.as_mut_ptr(), - raw_pattern, - mem::size_of::(), - dst.len() * mem::size_of::(), - signal_event, - wait_len, - wait_ptr - )); - Ok(()) - }) - } - - pub unsafe fn append_memory_fill_raw( - &self, - dst: *mut c_void, - pattern: *mut c_void, - pattern_size: usize, - size: usize, - signal: Option<&Event>, - wait: &[&Event], - ) -> Result<()> { - let signal_event = signal.map_or(ptr::null_mut(), |e| e.as_ffi()); - Event::with_raw_slice(wait, |wait_len, wait_ptr| { - check!(sys::zeCommandListAppendMemoryFill( - self.as_ffi(), - dst, - pattern, - pattern_size, - size, - signal_event, - wait_len, - wait_ptr - )); - Ok(()) - }) - } - - pub unsafe fn append_launch_kernel( - &self, - kernel: &Kernel, - group_count: &[u32; 3], - signal: Option<&Event>, - wait: &[&Event], - ) -> Result<()> { - let gr_count = sys::ze_group_count_t { - groupCountX: group_count[0], - groupCountY: group_count[1], - groupCountZ: group_count[2], - }; - let signal_event = signal.map_or(ptr::null_mut(), |e| e.as_ffi()); - Event::with_raw_slice(wait, |wait_len, wait_ptr| { - check!(sys::zeCommandListAppendLaunchKernel( - self.as_ffi(), - kernel.as_ffi(), - &gr_count, - signal_event, - wait_len, - wait_ptr, - )); - Ok(()) - }) - } - - pub unsafe fn append_barrier(&self, signal: Option<&Event>, wait: &[&Event]) -> Result<()> { - let signal_event = signal.map_or(ptr::null_mut(), |e| e.as_ffi()); - Event::with_raw_slice(wait, |wait_len, wait_ptr| { - check!(sys::zeCommandListAppendBarrier( - self.as_ffi(), - signal_event, - wait_len, - wait_ptr - )); - Ok(()) - }) - } - - pub fn close(&self) -> Result<()> { - check!(sys::zeCommandListClose(self.as_ffi())); - Ok(()) - } -} - -impl<'a> Drop for CommandList<'a> { - #[allow(unused_must_use)] - fn drop(&mut self) { - check_panic! { sys::zeCommandListDestroy(self.as_ffi()) }; - } -} - -pub struct CommandListBuilder<'a>(CommandList<'a>); - -unsafe impl<'a> Send for CommandListBuilder<'a> {} - -impl<'a> CommandListBuilder<'a> { - pub fn new(ctx: &'a Context, dev: Device) -> Result { - Ok(CommandListBuilder(CommandList::new(ctx, dev)?)) - } - - pub fn append_memory_copy< - 'dep, - 'result, - T: 'dep + Copy + Sized, - Dst: Into>, - Src: Into>, - >( - self, - dst: Dst, - src: Src, - signal: Option<&'dep Event<'dep>>, - wait: &[&'dep Event<'dep>], - ) -> Result> - where - 'a: 'result, - 'dep: 'result, - { - unsafe { self.0.append_memory_copy(dst, src, signal, wait) }?; - Ok(self) - } - - pub fn append_memory_fill<'dep, 'result, T: 'dep + Copy + Sized, Dst: Into>>( - self, - dst: Dst, - pattern: &T, - signal: Option<&Event<'dep>>, - wait: &[&'dep Event<'dep>], - ) -> Result> - where - 'a: 'result, - 'dep: 'result, - { - unsafe { self.0.append_memory_fill(dst, pattern, signal, wait) }?; - Ok(self) - } - - pub fn append_launch_kernel<'dep, 'result>( - self, - kernel: &'dep Kernel, - group_count: &[u32; 3], - signal: Option<&Event<'dep>>, - wait: &[&'dep Event<'dep>], - ) -> Result> - where - 'a: 'result, - 'dep: 'result, - { - unsafe { - self.0 - .append_launch_kernel(kernel, group_count, signal, wait) - }?; - Ok(self) - } - - pub fn execute(self, q: &'a CommandQueue<'a>) -> Result> { - self.0.close()?; - q.execute_and_synchronize(self.0) - } -} - -#[derive(Copy, Clone)] -pub struct Slice<'a, T: Copy + Sized> { - ptr: *mut c_void, - len: usize, - marker: PhantomData<&'a T>, -} - -unsafe impl<'a, T: Copy + Sized> Send for Slice<'a, T> {} -unsafe impl<'a, T: Copy + Sized> Sync for Slice<'a, T> {} - -impl<'a, T: Copy + Sized> Slice<'a, T> { - pub unsafe fn new(ptr: *mut c_void, len: usize) -> Self { - Self { - ptr, - len, - marker: PhantomData, - } - } - - pub fn as_ptr(&self) -> *const c_void { - self.ptr - } - - pub fn as_mut_ptr(&self) -> *mut c_void { - self.ptr - } - - pub fn len(&self) -> usize { - self.len - } -} - -impl<'a, T: Copy + Sized> From<&'a [T]> for Slice<'a, T> { - fn from(s: &'a [T]) -> Self { - Slice { - ptr: s.as_ptr() as *mut _, - len: s.len(), - marker: PhantomData, - } - } -} - -impl<'a, T: Copy + Sized> From<&'a DeviceBuffer<'a, T>> for Slice<'a, T> { - fn from(b: &'a DeviceBuffer<'a, T>) -> Self { - Slice { - ptr: b.ptr, - len: b.len, - marker: PhantomData, - } - } -} - -// Mutability: yes (appends) -// Lifetime parent: Context -pub struct EventPool<'a>(NonNull, PhantomData<&'a ()>); - -impl<'a> EventPool<'a> { - pub unsafe fn as_ffi(&self) -> sys::ze_event_pool_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_event_pool_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x), PhantomData) - } - - pub fn new( - ctx: &'a Context, - flags: sys::ze_event_pool_flags_t, - count: u32, - devs: Option<&[Device]>, - ) -> Result { - let desc = sys::ze_event_pool_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_EVENT_POOL_DESC, - pNext: ptr::null(), - flags: flags, - count: count, - }; - let (dev_len, dev_ptr) = devs.map_or((0, ptr::null_mut()), |devs| { - (devs.len(), devs.as_ptr() as *mut _) - }); - let mut result = ptr::null_mut(); - check!(sys::zeEventPoolCreate( - ctx.as_ffi(), - &desc, - dev_len as u32, - dev_ptr, - &mut result - )); - Ok(unsafe { Self::from_ffi(result) }) - } -} - -impl<'a> Drop for EventPool<'a> { - fn drop(&mut self) { - check_panic! { sys::zeEventPoolDestroy(self.as_ffi()) }; - } -} - -pub struct Event<'a>(NonNull, PhantomData<&'a ()>); - -impl<'a> Event<'a> { - pub unsafe fn as_ffi(&self) -> sys::ze_event_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_event_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x), PhantomData) - } - - pub fn new( - pool: &'a EventPool<'a>, - index: u32, - signal: sys::ze_event_scope_flags_t, - wait: sys::ze_event_scope_flags_t, - ) -> Result { - let desc = sys::ze_event_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_EVENT_DESC, - pNext: ptr::null(), - index: index, - signal, - wait, - }; - let mut result = ptr::null_mut(); - check!(sys::zeEventCreate(pool.as_ffi(), &desc, &mut result)); - Ok(unsafe { Self::from_ffi(result) }) - } - - pub fn host_synchronize(&self, timeout_ns: u64) -> Result<()> { - check! { sys::zeEventHostSynchronize(self.as_ffi(), timeout_ns) }; - Ok(()) - } - - pub fn is_ready(&self) -> Result { - let status = unsafe { sys::zeEventQueryStatus(self.as_ffi()) }; - match status { - sys::ze_result_t::ZE_RESULT_SUCCESS => Ok(true), - sys::ze_result_t::ZE_RESULT_NOT_READY => Ok(false), - err => Err(err), - } - } - - pub fn host_reset(&self) -> Result<()> { - check! { sys::zeEventHostReset(self.as_ffi()) }; - Ok(()) - } - - unsafe fn with_raw_slice<'x, T>( - events: &[&Event<'x>], - f: impl FnOnce(u32, *mut sys::ze_event_handle_t) -> T, - ) -> T { - let (ptr, ev_vec) = match events { - [] => (ptr::null_mut(), None), - [e] => (&e.0 as *const _ as *mut _, None), - _ => { - let mut ev_vec = events.iter().map(|e| e.as_ffi()).collect::>(); - (ev_vec.as_mut_ptr(), Some(ev_vec)) - } - }; - let result = f(events.len() as u32, ptr); - drop(ev_vec); - result - } -} - -impl<'a> Drop for Event<'a> { - fn drop(&mut self) { - check_panic! { sys::zeEventDestroy(self.as_ffi()) }; - } -} - -pub struct Kernel<'a>(NonNull, PhantomData<&'a ()>); - -impl<'a> Kernel<'a> { - pub unsafe fn as_ffi(&self) -> sys::ze_kernel_handle_t { - self.0.as_ptr() - } - pub unsafe fn from_ffi(x: sys::ze_kernel_handle_t) -> Self { - if x == ptr::null_mut() { - panic!("FFI handle can't be zero") - } - Self(NonNull::new_unchecked(x), PhantomData) - } - - pub fn new_resident(module: &'a Module, name: &CStr) -> Result { - let desc = sys::ze_kernel_desc_t { - stype: sys::ze_structure_type_t::ZE_STRUCTURE_TYPE_KERNEL_DESC, - pNext: ptr::null(), - flags: sys::ze_kernel_flags_t::ZE_KERNEL_FLAG_FORCE_RESIDENCY, - pKernelName: name.as_ptr() as *const _, - }; - let mut result = ptr::null_mut(); - check!(sys::zeKernelCreate(module.as_ffi(), &desc, &mut result)); - Ok(unsafe { Self::from_ffi(result) }) - } - - pub fn set_indirect_access(&self, flags: sys::ze_kernel_indirect_access_flags_t) -> Result<()> { - check!(sys::zeKernelSetIndirectAccess(self.as_ffi(), flags)); - Ok(()) - } - - pub fn set_arg_buffer>>( - &self, - index: u32, - buff: Buff, - ) -> Result<()> { - let ptr = buff.into().as_mut_ptr(); - check!(sys::zeKernelSetArgumentValue( - self.as_ffi(), - index, - mem::size_of::<*const ()>(), - &ptr as *const _ as *const _, - )); - Ok(()) - } - - pub fn set_arg_scalar(&self, index: u32, value: &T) -> Result<()> { - check!(sys::zeKernelSetArgumentValue( - self.as_ffi(), - index, - mem::size_of::(), - value as *const T as *const _, - )); - Ok(()) - } - - pub unsafe fn set_arg_raw(&self, index: u32, size: usize, value: *const c_void) -> Result<()> { - check!(sys::zeKernelSetArgumentValue( - self.as_ffi(), - index, - size, - value - )); - Ok(()) - } - - pub fn set_group_size(&self, x: u32, y: u32, z: u32) -> Result<()> { - check!(sys::zeKernelSetGroupSize(self.as_ffi(), x, y, z)); - Ok(()) - } - - pub fn get_properties(&self) -> Result> { - let mut props = Box::new(unsafe { mem::zeroed::() }); - check!(sys::zeKernelGetProperties( - self.as_ffi(), - props.as_mut() as *mut _ - )); - Ok(props) - } -} - -impl<'a> Drop for Kernel<'a> { - #[allow(unused_must_use)] - fn drop(&mut self) { - check_panic! { sys::zeKernelDestroy(self.as_ffi()) }; - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn event_has_correct_layout() { - assert_eq!( - mem::size_of::(), - mem::size_of::() - ); - } -} diff --git a/llvm_zluda/src/lib.cpp b/llvm_zluda/src/lib.cpp index 072f7731..c8ac2d79 100644 --- a/llvm_zluda/src/lib.cpp +++ b/llvm_zluda/src/lib.cpp @@ -1,7 +1,10 @@ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" #include #include #include #include +#pragma GCC diagnostic pop using namespace llvm; @@ -189,7 +192,8 @@ void LLVMZludaBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering, auto builder = llvm::unwrap(B); LLVMContext &context = builder->getContext(); builder->CreateFence(mapFromLLVMOrdering(Ordering), - context.getOrInsertSyncScopeID(scope)); + context.getOrInsertSyncScopeID(scope), + Name); } LLVM_C_EXTERN_C_END \ No newline at end of file diff --git a/ptx/lib/zluda_ptx_impl.bc b/ptx/lib/zluda_ptx_impl.bc index 6651430d..4b5a5d82 100644 Binary files a/ptx/lib/zluda_ptx_impl.bc and b/ptx/lib/zluda_ptx_impl.bc differ diff --git a/ptx/lib/zluda_ptx_impl.cpp b/ptx/lib/zluda_ptx_impl.cpp index d0ec8534..f86a7fd7 100644 --- a/ptx/lib/zluda_ptx_impl.cpp +++ b/ptx/lib/zluda_ptx_impl.cpp @@ -25,16 +25,16 @@ extern "C" return (uint32_t)__ockl_get_local_size(member); } - size_t __ockl_get_global_id(uint32_t) __device__; + size_t __ockl_get_group_id(uint32_t) __device__; uint32_t FUNC(sreg_ctaid)(uint8_t member) { - return (uint32_t)__ockl_get_global_id(member); + return (uint32_t)__ockl_get_group_id(member); } - size_t __ockl_get_global_size(uint32_t) __device__; + size_t __ockl_get_num_groups(uint32_t) __device__; uint32_t FUNC(sreg_nctaid)(uint8_t member) { - return (uint32_t)__ockl_get_global_size(member); + return (uint32_t)__ockl_get_num_groups(member); } uint32_t __ockl_bfe_u32(uint32_t, uint32_t, uint32_t) __attribute__((device)); diff --git a/ptx_parser/src/lib.rs b/ptx_parser/src/lib.rs index 1ea2d715..f2c376d5 100644 --- a/ptx_parser/src/lib.rs +++ b/ptx_parser/src/lib.rs @@ -284,20 +284,40 @@ fn immediate_value<'a, 'input>(stream: &mut PtxParser<'a, 'input>) -> PResult(text: &'input str) -> Option> { - let input = lex_with_span(text).ok()?; - let mut errors = Vec::new(); - let state = PtxParserState::new(text, &mut errors); - let parser = PtxParser { - state, - input: &input[..], +pub fn parse_for_errors<'input>(text: &'input str) -> Vec { + let (tokens, mut errors) = lex_with_span_unchecked(text); + let parse_result = { + let state = PtxParserState::new(text, &mut errors); + let parser = PtxParser { + state, + input: &tokens[..], + }; + module + .parse(parser) + .map_err(|err| PtxError::Parser(err.into_inner())) }; - let parsing_result = module.parse(parser).ok(); - if !errors.is_empty() { - None - } else { - parsing_result + match parse_result { + Ok(_) => {} + Err(err) => { + errors.push(err); + } } + errors +} + +fn lex_with_span_unchecked<'input>( + text: &'input str, +) -> (Vec<(Token<'input>, logos::Span)>, Vec) { + let lexer = Token::lexer(text); + let mut result = Vec::new(); + let mut errors = Vec::new(); + for (token, span) in lexer.spanned() { + match token { + Ok(t) => result.push((t, span)), + Err(err) => errors.push(PtxError::Lexer { source: err }), + } + } + (result, errors) } pub fn parse_module_checked<'input>( @@ -342,17 +362,6 @@ pub fn parse_module_checked<'input>( } } -fn lex_with_span<'input>( - text: &'input str, -) -> Result, logos::Span)>, TokenError> { - let lexer = Token::lexer(text); - let mut result = Vec::new(); - for (token, span) in lexer.spanned() { - result.push((token?, span)); - } - Ok(result) -} - fn module<'a, 'input>(stream: &mut PtxParser<'a, 'input>) -> PResult> { ( version, diff --git a/spirv_tools-sys/Cargo.toml b/spirv_tools-sys/Cargo.toml deleted file mode 100644 index a5cadc17..00000000 --- a/spirv_tools-sys/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "spirv_tools-sys" -version = "0.0.0" -authors = ["Andrzej Janik "] -edition = "2018" - -[lib] - -[build-dependencies] -cmake = "0.1" \ No newline at end of file diff --git a/spirv_tools-sys/README b/spirv_tools-sys/README deleted file mode 100644 index 31521640..00000000 --- a/spirv_tools-sys/README +++ /dev/null @@ -1 +0,0 @@ -bindgen --whitelist-type="spv.*" --whitelist-function="spv.*" --size_t-is-usize --default-enum-style=rust --bitfield-enum="spv_text_to_binary_options_t|spv_binary_to_text_options_t" ../ext/SPIRV-Tools/include/spirv-tools/libspirv.h -o src/spirv_tools.rs \ No newline at end of file diff --git a/spirv_tools-sys/build.rs b/spirv_tools-sys/build.rs deleted file mode 100644 index ae725614..00000000 --- a/spirv_tools-sys/build.rs +++ /dev/null @@ -1,28 +0,0 @@ -extern crate cmake; - -use cmake::Config; -use std::{env::VarError, path::PathBuf}; - -fn main() -> Result<(), VarError> { - let root_path = std::env::var("CARGO_MANIFEST_DIR")?; - let mut headers_path = PathBuf::new(); - headers_path.push(root_path); - headers_path.push("../ext/spirv-headers"); - let spirv_tools_dir = Config::new("../ext/spirv-tools") - .always_configure(false) - .define("SPIRV-Headers_SOURCE_DIR", headers_path) - .define("SPIRV_SKIP_EXECUTABLES", "ON") - .define("SPIRV_SKIP_TESTS", "ON") - .build(); - println!( - "cargo:rustc-link-search=native={}/bin", - spirv_tools_dir.display() - ); - println!( - "cargo:rustc-link-search=native={}/lib", - spirv_tools_dir.display() - ); - // dynamic linking to avoid linking to C++ runtime - println!("cargo:rustc-link-lib=dylib=SPIRV-Tools-shared"); - Ok(()) -} diff --git a/spirv_tools-sys/src/lib.rs b/spirv_tools-sys/src/lib.rs deleted file mode 100644 index c1a9dc2f..00000000 --- a/spirv_tools-sys/src/lib.rs +++ /dev/null @@ -1,3 +0,0 @@ -#[allow(warnings)] -mod spirv_tools; -pub use spirv_tools::*; \ No newline at end of file diff --git a/spirv_tools-sys/src/spirv_tools.rs b/spirv_tools-sys/src/spirv_tools.rs deleted file mode 100644 index fe9640b8..00000000 --- a/spirv_tools-sys/src/spirv_tools.rs +++ /dev/null @@ -1,972 +0,0 @@ -/* automatically generated by rust-bindgen 0.54.1 */ - -pub type __uint16_t = ::std::os::raw::c_ushort; -pub type __uint32_t = ::std::os::raw::c_uint; -#[repr(i32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_result_t { - SPV_SUCCESS = 0, - SPV_UNSUPPORTED = 1, - SPV_END_OF_STREAM = 2, - SPV_WARNING = 3, - SPV_FAILED_MATCH = 4, - SPV_REQUESTED_TERMINATION = 5, - SPV_ERROR_INTERNAL = -1, - SPV_ERROR_OUT_OF_MEMORY = -2, - SPV_ERROR_INVALID_POINTER = -3, - SPV_ERROR_INVALID_BINARY = -4, - SPV_ERROR_INVALID_TEXT = -5, - SPV_ERROR_INVALID_TABLE = -6, - SPV_ERROR_INVALID_VALUE = -7, - SPV_ERROR_INVALID_DIAGNOSTIC = -8, - SPV_ERROR_INVALID_LOOKUP = -9, - SPV_ERROR_INVALID_ID = -10, - SPV_ERROR_INVALID_CFG = -11, - SPV_ERROR_INVALID_LAYOUT = -12, - SPV_ERROR_INVALID_CAPABILITY = -13, - SPV_ERROR_INVALID_DATA = -14, - SPV_ERROR_MISSING_EXTENSION = -15, - SPV_ERROR_WRONG_VERSION = -16, - _spv_result_t = 2147483647, -} -#[repr(u32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_message_level_t { - SPV_MSG_FATAL = 0, - SPV_MSG_INTERNAL_ERROR = 1, - SPV_MSG_ERROR = 2, - SPV_MSG_WARNING = 3, - SPV_MSG_INFO = 4, - SPV_MSG_DEBUG = 5, -} -#[repr(u32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_endianness_t { - SPV_ENDIANNESS_LITTLE = 0, - SPV_ENDIANNESS_BIG = 1, - _spv_endianness_t = 2147483647, -} -impl spv_operand_type_t { - pub const SPV_OPERAND_TYPE_FIRST_OPTIONAL_TYPE: spv_operand_type_t = - spv_operand_type_t::SPV_OPERAND_TYPE_OPTIONAL_ID; -} -impl spv_operand_type_t { - pub const SPV_OPERAND_TYPE_FIRST_VARIABLE_TYPE: spv_operand_type_t = - spv_operand_type_t::SPV_OPERAND_TYPE_VARIABLE_ID; -} -impl spv_operand_type_t { - pub const SPV_OPERAND_TYPE_LAST_VARIABLE_TYPE: spv_operand_type_t = - spv_operand_type_t::SPV_OPERAND_TYPE_VARIABLE_ID_LITERAL_INTEGER; -} -impl spv_operand_type_t { - pub const SPV_OPERAND_TYPE_LAST_OPTIONAL_TYPE: spv_operand_type_t = - spv_operand_type_t::SPV_OPERAND_TYPE_VARIABLE_ID_LITERAL_INTEGER; -} -#[repr(u32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_operand_type_t { - SPV_OPERAND_TYPE_NONE = 0, - SPV_OPERAND_TYPE_ID = 1, - SPV_OPERAND_TYPE_TYPE_ID = 2, - SPV_OPERAND_TYPE_RESULT_ID = 3, - SPV_OPERAND_TYPE_MEMORY_SEMANTICS_ID = 4, - SPV_OPERAND_TYPE_SCOPE_ID = 5, - SPV_OPERAND_TYPE_LITERAL_INTEGER = 6, - SPV_OPERAND_TYPE_EXTENSION_INSTRUCTION_NUMBER = 7, - SPV_OPERAND_TYPE_SPEC_CONSTANT_OP_NUMBER = 8, - SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER = 9, - SPV_OPERAND_TYPE_LITERAL_STRING = 10, - SPV_OPERAND_TYPE_SOURCE_LANGUAGE = 11, - SPV_OPERAND_TYPE_EXECUTION_MODEL = 12, - SPV_OPERAND_TYPE_ADDRESSING_MODEL = 13, - SPV_OPERAND_TYPE_MEMORY_MODEL = 14, - SPV_OPERAND_TYPE_EXECUTION_MODE = 15, - SPV_OPERAND_TYPE_STORAGE_CLASS = 16, - SPV_OPERAND_TYPE_DIMENSIONALITY = 17, - SPV_OPERAND_TYPE_SAMPLER_ADDRESSING_MODE = 18, - SPV_OPERAND_TYPE_SAMPLER_FILTER_MODE = 19, - SPV_OPERAND_TYPE_SAMPLER_IMAGE_FORMAT = 20, - SPV_OPERAND_TYPE_IMAGE_CHANNEL_ORDER = 21, - SPV_OPERAND_TYPE_IMAGE_CHANNEL_DATA_TYPE = 22, - SPV_OPERAND_TYPE_FP_ROUNDING_MODE = 23, - SPV_OPERAND_TYPE_LINKAGE_TYPE = 24, - SPV_OPERAND_TYPE_ACCESS_QUALIFIER = 25, - SPV_OPERAND_TYPE_FUNCTION_PARAMETER_ATTRIBUTE = 26, - SPV_OPERAND_TYPE_DECORATION = 27, - SPV_OPERAND_TYPE_BUILT_IN = 28, - SPV_OPERAND_TYPE_GROUP_OPERATION = 29, - SPV_OPERAND_TYPE_KERNEL_ENQ_FLAGS = 30, - SPV_OPERAND_TYPE_KERNEL_PROFILING_INFO = 31, - SPV_OPERAND_TYPE_CAPABILITY = 32, - SPV_OPERAND_TYPE_RAY_FLAGS = 33, - SPV_OPERAND_TYPE_RAY_QUERY_INTERSECTION = 34, - SPV_OPERAND_TYPE_RAY_QUERY_COMMITTED_INTERSECTION_TYPE = 35, - SPV_OPERAND_TYPE_RAY_QUERY_CANDIDATE_INTERSECTION_TYPE = 36, - SPV_OPERAND_TYPE_IMAGE = 37, - SPV_OPERAND_TYPE_FP_FAST_MATH_MODE = 38, - SPV_OPERAND_TYPE_SELECTION_CONTROL = 39, - SPV_OPERAND_TYPE_LOOP_CONTROL = 40, - SPV_OPERAND_TYPE_FUNCTION_CONTROL = 41, - SPV_OPERAND_TYPE_MEMORY_ACCESS = 42, - SPV_OPERAND_TYPE_OPTIONAL_ID = 43, - SPV_OPERAND_TYPE_OPTIONAL_IMAGE = 44, - SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS = 45, - SPV_OPERAND_TYPE_OPTIONAL_LITERAL_INTEGER = 46, - SPV_OPERAND_TYPE_OPTIONAL_LITERAL_NUMBER = 47, - SPV_OPERAND_TYPE_OPTIONAL_TYPED_LITERAL_INTEGER = 48, - SPV_OPERAND_TYPE_OPTIONAL_LITERAL_STRING = 49, - SPV_OPERAND_TYPE_OPTIONAL_ACCESS_QUALIFIER = 50, - SPV_OPERAND_TYPE_OPTIONAL_CIV = 51, - SPV_OPERAND_TYPE_VARIABLE_ID = 52, - SPV_OPERAND_TYPE_VARIABLE_LITERAL_INTEGER = 53, - SPV_OPERAND_TYPE_VARIABLE_LITERAL_INTEGER_ID = 54, - SPV_OPERAND_TYPE_VARIABLE_ID_LITERAL_INTEGER = 55, - SPV_OPERAND_TYPE_DEBUG_INFO_FLAGS = 56, - SPV_OPERAND_TYPE_DEBUG_BASE_TYPE_ATTRIBUTE_ENCODING = 57, - SPV_OPERAND_TYPE_DEBUG_COMPOSITE_TYPE = 58, - SPV_OPERAND_TYPE_DEBUG_TYPE_QUALIFIER = 59, - SPV_OPERAND_TYPE_DEBUG_OPERATION = 60, - SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_INFO_FLAGS = 61, - SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_BASE_TYPE_ATTRIBUTE_ENCODING = 62, - SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_COMPOSITE_TYPE = 63, - SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_TYPE_QUALIFIER = 64, - SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_OPERATION = 65, - SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_IMPORTED_ENTITY = 66, - SPV_OPERAND_TYPE_NUM_OPERAND_TYPES = 67, - _spv_operand_type_t = 2147483647, -} -#[repr(u32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_ext_inst_type_t { - SPV_EXT_INST_TYPE_NONE = 0, - SPV_EXT_INST_TYPE_GLSL_STD_450 = 1, - SPV_EXT_INST_TYPE_OPENCL_STD = 2, - SPV_EXT_INST_TYPE_SPV_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER = 3, - SPV_EXT_INST_TYPE_SPV_AMD_SHADER_TRINARY_MINMAX = 4, - SPV_EXT_INST_TYPE_SPV_AMD_GCN_SHADER = 5, - SPV_EXT_INST_TYPE_SPV_AMD_SHADER_BALLOT = 6, - SPV_EXT_INST_TYPE_DEBUGINFO = 7, - SPV_EXT_INST_TYPE_OPENCL_DEBUGINFO_100 = 8, - SPV_EXT_INST_TYPE_NONSEMANTIC_UNKNOWN = 9, - _spv_ext_inst_type_t = 2147483647, -} -#[repr(u32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_number_kind_t { - SPV_NUMBER_NONE = 0, - SPV_NUMBER_UNSIGNED_INT = 1, - SPV_NUMBER_SIGNED_INT = 2, - SPV_NUMBER_FLOATING = 3, -} -impl spv_text_to_binary_options_t { - pub const SPV_TEXT_TO_BINARY_OPTION_NONE: spv_text_to_binary_options_t = - spv_text_to_binary_options_t(1); -} -impl spv_text_to_binary_options_t { - pub const SPV_TEXT_TO_BINARY_OPTION_PRESERVE_NUMERIC_IDS: spv_text_to_binary_options_t = - spv_text_to_binary_options_t(2); -} -impl spv_text_to_binary_options_t { - pub const _spv_text_to_binary_options_t: spv_text_to_binary_options_t = - spv_text_to_binary_options_t(2147483647); -} -impl ::std::ops::BitOr for spv_text_to_binary_options_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - spv_text_to_binary_options_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for spv_text_to_binary_options_t { - #[inline] - fn bitor_assign(&mut self, rhs: spv_text_to_binary_options_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for spv_text_to_binary_options_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - spv_text_to_binary_options_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for spv_text_to_binary_options_t { - #[inline] - fn bitand_assign(&mut self, rhs: spv_text_to_binary_options_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct spv_text_to_binary_options_t(pub u32); -impl spv_binary_to_text_options_t { - pub const SPV_BINARY_TO_TEXT_OPTION_NONE: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(1); -} -impl spv_binary_to_text_options_t { - pub const SPV_BINARY_TO_TEXT_OPTION_PRINT: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(2); -} -impl spv_binary_to_text_options_t { - pub const SPV_BINARY_TO_TEXT_OPTION_COLOR: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(4); -} -impl spv_binary_to_text_options_t { - pub const SPV_BINARY_TO_TEXT_OPTION_INDENT: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(8); -} -impl spv_binary_to_text_options_t { - pub const SPV_BINARY_TO_TEXT_OPTION_SHOW_BYTE_OFFSET: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(16); -} -impl spv_binary_to_text_options_t { - pub const SPV_BINARY_TO_TEXT_OPTION_NO_HEADER: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(32); -} -impl spv_binary_to_text_options_t { - pub const SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(64); -} -impl spv_binary_to_text_options_t { - pub const _spv_binary_to_text_options_t: spv_binary_to_text_options_t = - spv_binary_to_text_options_t(2147483647); -} -impl ::std::ops::BitOr for spv_binary_to_text_options_t { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - spv_binary_to_text_options_t(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for spv_binary_to_text_options_t { - #[inline] - fn bitor_assign(&mut self, rhs: spv_binary_to_text_options_t) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for spv_binary_to_text_options_t { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - spv_binary_to_text_options_t(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for spv_binary_to_text_options_t { - #[inline] - fn bitand_assign(&mut self, rhs: spv_binary_to_text_options_t) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct spv_binary_to_text_options_t(pub u32); -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_parsed_operand_t { - pub offset: u16, - pub num_words: u16, - pub type_: spv_operand_type_t, - pub number_kind: spv_number_kind_t, - pub number_bit_width: u32, -} -#[test] -fn bindgen_test_layout_spv_parsed_operand_t() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(spv_parsed_operand_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(spv_parsed_operand_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_operand_t), - "::", - stringify!(offset) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).num_words as *const _ as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_operand_t), - "::", - stringify!(num_words) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).type_ as *const _ as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_operand_t), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).number_kind as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_operand_t), - "::", - stringify!(number_kind) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).number_bit_width as *const _ as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_operand_t), - "::", - stringify!(number_bit_width) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_parsed_instruction_t { - pub words: *const u32, - pub num_words: u16, - pub opcode: u16, - pub ext_inst_type: spv_ext_inst_type_t, - pub type_id: u32, - pub result_id: u32, - pub operands: *const spv_parsed_operand_t, - pub num_operands: u16, -} -#[test] -fn bindgen_test_layout_spv_parsed_instruction_t() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(spv_parsed_instruction_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(spv_parsed_instruction_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).words as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(words) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).num_words as *const _ as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(num_words) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).opcode as *const _ as usize }, - 10usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(opcode) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).ext_inst_type as *const _ as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(ext_inst_type) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).type_id as *const _ as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(type_id) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).result_id as *const _ as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(result_id) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).operands as *const _ as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(operands) - ) - ); - assert_eq!( - unsafe { - &(*(::std::ptr::null::())).num_operands as *const _ as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(spv_parsed_instruction_t), - "::", - stringify!(num_operands) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_const_binary_t { - pub code: *const u32, - pub wordCount: usize, -} -#[test] -fn bindgen_test_layout_spv_const_binary_t() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(spv_const_binary_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(spv_const_binary_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).code as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(spv_const_binary_t), - "::", - stringify!(code) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).wordCount as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(spv_const_binary_t), - "::", - stringify!(wordCount) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_binary_t { - pub code: *mut u32, - pub wordCount: usize, -} -#[test] -fn bindgen_test_layout_spv_binary_t() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(spv_binary_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(spv_binary_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).code as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(spv_binary_t), - "::", - stringify!(code) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).wordCount as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(spv_binary_t), - "::", - stringify!(wordCount) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_text_t { - pub str_: *const ::std::os::raw::c_char, - pub length: usize, -} -#[test] -fn bindgen_test_layout_spv_text_t() { - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(spv_text_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(spv_text_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).str_ as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(spv_text_t), - "::", - stringify!(str_) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).length as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(spv_text_t), - "::", - stringify!(length) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_position_t { - pub line: usize, - pub column: usize, - pub index: usize, -} -#[test] -fn bindgen_test_layout_spv_position_t() { - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(spv_position_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(spv_position_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).line as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(spv_position_t), - "::", - stringify!(line) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).column as *const _ as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(spv_position_t), - "::", - stringify!(column) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).index as *const _ as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(spv_position_t), - "::", - stringify!(index) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_diagnostic_t { - pub position: spv_position_t, - pub error: *mut ::std::os::raw::c_char, - pub isTextSource: bool, -} -#[test] -fn bindgen_test_layout_spv_diagnostic_t() { - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(spv_diagnostic_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(spv_diagnostic_t)) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).position as *const _ as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(spv_diagnostic_t), - "::", - stringify!(position) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).error as *const _ as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(spv_diagnostic_t), - "::", - stringify!(error) - ) - ); - assert_eq!( - unsafe { &(*(::std::ptr::null::())).isTextSource as *const _ as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(spv_diagnostic_t), - "::", - stringify!(isTextSource) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_context_t { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_validator_options_t { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_optimizer_options_t { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_reducer_options_t { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct spv_fuzzer_options_t { - _unused: [u8; 0], -} -pub type spv_const_binary = *mut spv_const_binary_t; -pub type spv_binary = *mut spv_binary_t; -pub type spv_text = *mut spv_text_t; -pub type spv_position = *mut spv_position_t; -pub type spv_diagnostic = *mut spv_diagnostic_t; -pub type spv_const_context = *const spv_context_t; -pub type spv_context = *mut spv_context_t; -pub type spv_validator_options = *mut spv_validator_options_t; -pub type spv_const_validator_options = *const spv_validator_options_t; -pub type spv_optimizer_options = *mut spv_optimizer_options_t; -pub type spv_const_optimizer_options = *const spv_optimizer_options_t; -pub type spv_reducer_options = *mut spv_reducer_options_t; -pub type spv_const_reducer_options = *const spv_reducer_options_t; -pub type spv_fuzzer_options = *mut spv_fuzzer_options_t; -pub type spv_const_fuzzer_options = *const spv_fuzzer_options_t; -extern "C" { - pub fn spvSoftwareVersionString() -> *const ::std::os::raw::c_char; -} -extern "C" { - pub fn spvSoftwareVersionDetailsString() -> *const ::std::os::raw::c_char; -} -#[repr(u32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_target_env { - SPV_ENV_UNIVERSAL_1_0 = 0, - SPV_ENV_VULKAN_1_0 = 1, - SPV_ENV_UNIVERSAL_1_1 = 2, - SPV_ENV_OPENCL_2_1 = 3, - SPV_ENV_OPENCL_2_2 = 4, - SPV_ENV_OPENGL_4_0 = 5, - SPV_ENV_OPENGL_4_1 = 6, - SPV_ENV_OPENGL_4_2 = 7, - SPV_ENV_OPENGL_4_3 = 8, - SPV_ENV_OPENGL_4_5 = 9, - SPV_ENV_UNIVERSAL_1_2 = 10, - SPV_ENV_OPENCL_1_2 = 11, - SPV_ENV_OPENCL_EMBEDDED_1_2 = 12, - SPV_ENV_OPENCL_2_0 = 13, - SPV_ENV_OPENCL_EMBEDDED_2_0 = 14, - SPV_ENV_OPENCL_EMBEDDED_2_1 = 15, - SPV_ENV_OPENCL_EMBEDDED_2_2 = 16, - SPV_ENV_UNIVERSAL_1_3 = 17, - SPV_ENV_VULKAN_1_1 = 18, - SPV_ENV_WEBGPU_0 = 19, - SPV_ENV_UNIVERSAL_1_4 = 20, - SPV_ENV_VULKAN_1_1_SPIRV_1_4 = 21, - SPV_ENV_UNIVERSAL_1_5 = 22, - SPV_ENV_VULKAN_1_2 = 23, -} -#[repr(u32)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum spv_validator_limit { - spv_validator_limit_max_struct_members = 0, - spv_validator_limit_max_struct_depth = 1, - spv_validator_limit_max_local_variables = 2, - spv_validator_limit_max_global_variables = 3, - spv_validator_limit_max_switch_branches = 4, - spv_validator_limit_max_function_args = 5, - spv_validator_limit_max_control_flow_nesting_depth = 6, - spv_validator_limit_max_access_chain_indexes = 7, - spv_validator_limit_max_id_bound = 8, -} -extern "C" { - pub fn spvTargetEnvDescription(env: spv_target_env) -> *const ::std::os::raw::c_char; -} -extern "C" { - pub fn spvParseTargetEnv(s: *const ::std::os::raw::c_char, env: *mut spv_target_env) -> bool; -} -extern "C" { - pub fn spvParseVulkanEnv(vulkan_ver: u32, spirv_ver: u32, env: *mut spv_target_env) -> bool; -} -extern "C" { - pub fn spvContextCreate(env: spv_target_env) -> spv_context; -} -extern "C" { - pub fn spvContextDestroy(context: spv_context); -} -extern "C" { - pub fn spvValidatorOptionsCreate() -> spv_validator_options; -} -extern "C" { - pub fn spvValidatorOptionsDestroy(options: spv_validator_options); -} -extern "C" { - pub fn spvValidatorOptionsSetUniversalLimit( - options: spv_validator_options, - limit_type: spv_validator_limit, - limit: u32, - ); -} -extern "C" { - pub fn spvValidatorOptionsSetRelaxStoreStruct(options: spv_validator_options, val: bool); -} -extern "C" { - pub fn spvValidatorOptionsSetRelaxLogicalPointer(options: spv_validator_options, val: bool); -} -extern "C" { - pub fn spvValidatorOptionsSetBeforeHlslLegalization(options: spv_validator_options, val: bool); -} -extern "C" { - pub fn spvValidatorOptionsSetRelaxBlockLayout(options: spv_validator_options, val: bool); -} -extern "C" { - pub fn spvValidatorOptionsSetUniformBufferStandardLayout( - options: spv_validator_options, - val: bool, - ); -} -extern "C" { - pub fn spvValidatorOptionsSetScalarBlockLayout(options: spv_validator_options, val: bool); -} -extern "C" { - pub fn spvValidatorOptionsSetSkipBlockLayout(options: spv_validator_options, val: bool); -} -extern "C" { - pub fn spvOptimizerOptionsCreate() -> spv_optimizer_options; -} -extern "C" { - pub fn spvOptimizerOptionsDestroy(options: spv_optimizer_options); -} -extern "C" { - pub fn spvOptimizerOptionsSetRunValidator(options: spv_optimizer_options, val: bool); -} -extern "C" { - pub fn spvOptimizerOptionsSetValidatorOptions( - options: spv_optimizer_options, - val: spv_validator_options, - ); -} -extern "C" { - pub fn spvOptimizerOptionsSetMaxIdBound(options: spv_optimizer_options, val: u32); -} -extern "C" { - pub fn spvOptimizerOptionsSetPreserveBindings(options: spv_optimizer_options, val: bool); -} -extern "C" { - pub fn spvOptimizerOptionsSetPreserveSpecConstants(options: spv_optimizer_options, val: bool); -} -extern "C" { - pub fn spvReducerOptionsCreate() -> spv_reducer_options; -} -extern "C" { - pub fn spvReducerOptionsDestroy(options: spv_reducer_options); -} -extern "C" { - pub fn spvReducerOptionsSetStepLimit(options: spv_reducer_options, step_limit: u32); -} -extern "C" { - pub fn spvReducerOptionsSetFailOnValidationError( - options: spv_reducer_options, - fail_on_validation_error: bool, - ); -} -extern "C" { - pub fn spvFuzzerOptionsCreate() -> spv_fuzzer_options; -} -extern "C" { - pub fn spvFuzzerOptionsDestroy(options: spv_fuzzer_options); -} -extern "C" { - pub fn spvFuzzerOptionsEnableReplayValidation(options: spv_fuzzer_options); -} -extern "C" { - pub fn spvFuzzerOptionsSetRandomSeed(options: spv_fuzzer_options, seed: u32); -} -extern "C" { - pub fn spvFuzzerOptionsSetShrinkerStepLimit( - options: spv_fuzzer_options, - shrinker_step_limit: u32, - ); -} -extern "C" { - pub fn spvFuzzerOptionsEnableFuzzerPassValidation(options: spv_fuzzer_options); -} -extern "C" { - pub fn spvTextToBinary( - context: spv_const_context, - text: *const ::std::os::raw::c_char, - length: usize, - binary: *mut spv_binary, - diagnostic: *mut spv_diagnostic, - ) -> spv_result_t; -} -extern "C" { - pub fn spvTextToBinaryWithOptions( - context: spv_const_context, - text: *const ::std::os::raw::c_char, - length: usize, - options: u32, - binary: *mut spv_binary, - diagnostic: *mut spv_diagnostic, - ) -> spv_result_t; -} -extern "C" { - pub fn spvTextDestroy(text: spv_text); -} -extern "C" { - pub fn spvBinaryToText( - context: spv_const_context, - binary: *const u32, - word_count: usize, - options: u32, - text: *mut spv_text, - diagnostic: *mut spv_diagnostic, - ) -> spv_result_t; -} -extern "C" { - pub fn spvBinaryDestroy(binary: spv_binary); -} -extern "C" { - pub fn spvValidate( - context: spv_const_context, - binary: spv_const_binary, - diagnostic: *mut spv_diagnostic, - ) -> spv_result_t; -} -extern "C" { - pub fn spvValidateWithOptions( - context: spv_const_context, - options: spv_const_validator_options, - binary: spv_const_binary, - diagnostic: *mut spv_diagnostic, - ) -> spv_result_t; -} -extern "C" { - pub fn spvValidateBinary( - context: spv_const_context, - words: *const u32, - num_words: usize, - diagnostic: *mut spv_diagnostic, - ) -> spv_result_t; -} -extern "C" { - pub fn spvDiagnosticCreate( - position: spv_position, - message: *const ::std::os::raw::c_char, - ) -> spv_diagnostic; -} -extern "C" { - pub fn spvDiagnosticDestroy(diagnostic: spv_diagnostic); -} -extern "C" { - pub fn spvDiagnosticPrint(diagnostic: spv_diagnostic) -> spv_result_t; -} -extern "C" { - pub fn spvOpcodeString(opcode: u32) -> *const ::std::os::raw::c_char; -} -pub type spv_parsed_header_fn_t = ::std::option::Option< - unsafe extern "C" fn( - user_data: *mut ::std::os::raw::c_void, - endian: spv_endianness_t, - magic: u32, - version: u32, - generator: u32, - id_bound: u32, - reserved: u32, - ) -> spv_result_t, ->; -pub type spv_parsed_instruction_fn_t = ::std::option::Option< - unsafe extern "C" fn( - user_data: *mut ::std::os::raw::c_void, - parsed_instruction: *const spv_parsed_instruction_t, - ) -> spv_result_t, ->; -extern "C" { - pub fn spvBinaryParse( - context: spv_const_context, - user_data: *mut ::std::os::raw::c_void, - words: *const u32, - num_words: usize, - parse_header: spv_parsed_header_fn_t, - parse_instruction: spv_parsed_instruction_fn_t, - diagnostic: *mut spv_diagnostic, - ) -> spv_result_t; -} diff --git a/zluda/Cargo.toml b/zluda/Cargo.toml index 837b7343..14e98457 100644 --- a/zluda/Cargo.toml +++ b/zluda/Cargo.toml @@ -2,26 +2,25 @@ name = "zluda" version = "0.0.0" authors = ["Andrzej Janik "] -edition = "2018" +edition = "2021" [lib] -name = "zluda" +name = "nvcuda" +crate-type = ["cdylib"] [dependencies] +comgr = { path = "../comgr" } +ptx_parser = { path = "../ptx_parser" } ptx = { path = "../ptx" } +cuda_types = { path = "../cuda_types" } +cuda_base = { path = "../cuda_base" } hip_runtime-sys = { path = "../ext/hip_runtime-sys" } lazy_static = "1.4" num_enum = "0.4" lz4-sys = "1.9" tempfile = "3" paste = "1.0" - -[dependencies.ocl-core] -version = "0.11" -features = ["opencl_version_1_2", "opencl_version_2_0", "opencl_version_2_1"] +rustc-hash = "1.1" [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["heapapi", "std"] } - -[dev-dependencies] -cuda-driver-sys = "0.3.0" \ No newline at end of file diff --git a/zluda/README b/zluda/README deleted file mode 100644 index f6d929ce..00000000 --- a/zluda/README +++ /dev/null @@ -1,3 +0,0 @@ -bindgen /usr/local/cuda/include/cuda.h -o cuda.rs --whitelist-function="^cu.*" --size_t-is-usize --default-enum-style=newtype --no-layout-tests --no-doc-comments --no-derive-debug --new-type-alias "^CUdevice$|^CUdeviceptr$" -sed -i -e 's/extern "C" {//g' -e 's/-> CUresult;/-> CUresult { impl_::unsupported()/g' -e 's/pub fn /#[no_mangle] pub extern "system" fn /g' cuda.rs -rustfmt cuda.rs \ No newline at end of file diff --git a/zluda/build.rs b/zluda/build.rs deleted file mode 100644 index 94c2c6f4..00000000 --- a/zluda/build.rs +++ /dev/null @@ -1,20 +0,0 @@ -use env::VarError; -use std::{env, path::PathBuf}; - -// HACK ALERT -// This is a temporary hack to to make sure that linker does not pick up -// NVIDIA OpenCL .lib using paths injected by cl-sys - -fn main() -> Result<(), VarError> { - if cfg!(windows) { - let env = env::var("CARGO_CFG_TARGET_ENV")?; - if env == "msvc" { - let mut path = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?); - path.push("lib"); - println!("cargo:rustc-link-search=native={}", path.display()); - } else { - println!("cargo:rustc-link-search=native=C:\\Windows\\System32"); - }; - } - Ok(()) -} diff --git a/zluda/src/cuda.rs b/zluda/src/cuda.rs deleted file mode 100644 index e7f5e422..00000000 --- a/zluda/src/cuda.rs +++ /dev/null @@ -1,4720 +0,0 @@ -use hip_runtime_sys::*; - -use super::r#impl; -use super::r#impl::{Decuda, Encuda}; - -/* automatically generated by rust-bindgen 0.55.1 */ - -pub type __uint32_t = ::std::os::raw::c_uint; -pub type __uint64_t = ::std::os::raw::c_ulong; -pub type cuuint32_t = u32; -pub type cuuint64_t = u64; -#[repr(transparent)] -#[derive(Copy, Clone)] -pub struct CUdeviceptr(pub usize); -#[repr(transparent)] -#[derive(Copy, Clone)] -pub struct CUdevice(pub ::std::os::raw::c_int); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUctx_st { - _unused: [u8; 0], -} -pub type CUcontext = *mut CUctx_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmod_st { - _unused: [u8; 0], -} -pub type CUmodule = *mut CUmod_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUfunc_st { - _unused: [u8; 0], -} -pub type CUfunction = *mut CUfunc_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUarray_st { - _unused: [u8; 0], -} -pub type CUarray = *mut CUarray_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmipmappedArray_st { - _unused: [u8; 0], -} -pub type CUmipmappedArray = *mut CUmipmappedArray_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUtexref_st { - _unused: [u8; 0], -} -pub type CUtexref = *mut CUtexref_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUsurfref_st { - _unused: [u8; 0], -} -pub type CUsurfref = *mut CUsurfref_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUevent_st { - _unused: [u8; 0], -} -pub type CUevent = *mut CUevent_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUstream_st { - _unused: [u8; 0], -} -pub type CUstream = *mut CUstream_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraphicsResource_st { - _unused: [u8; 0], -} -pub type CUgraphicsResource = *mut CUgraphicsResource_st; -pub type CUtexObject = ::std::os::raw::c_ulonglong; -pub type CUsurfObject = ::std::os::raw::c_ulonglong; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUextMemory_st { - _unused: [u8; 0], -} -pub type CUexternalMemory = *mut CUextMemory_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUextSemaphore_st { - _unused: [u8; 0], -} -pub type CUexternalSemaphore = *mut CUextSemaphore_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraph_st { - _unused: [u8; 0], -} -pub type CUgraph = *mut CUgraph_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraphNode_st { - _unused: [u8; 0], -} -pub type CUgraphNode = *mut CUgraphNode_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUgraphExec_st { - _unused: [u8; 0], -} -pub type CUgraphExec = *mut CUgraphExec_st; -#[repr(C)] -#[derive(Copy, Clone, PartialEq, Eq)] -pub struct CUuuid_st { - pub bytes: [::std::os::raw::c_uchar; 16usize], -} -pub type CUuuid = CUuuid_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUipcEventHandle_st { - pub reserved: [::std::os::raw::c_char; 64usize], -} -pub type CUipcEventHandle = CUipcEventHandle_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUipcMemHandle_st { - pub reserved: [::std::os::raw::c_char; 64usize], -} -pub type CUipcMemHandle = CUipcMemHandle_st; -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WAIT_VALUE_32: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(1); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WRITE_VALUE_32: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(2); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WAIT_VALUE_64: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(4); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_WRITE_VALUE_64: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(5); -} -impl CUstreamBatchMemOpType_enum { - pub const CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES: CUstreamBatchMemOpType_enum = - CUstreamBatchMemOpType_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUstreamBatchMemOpType_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamBatchMemOpType_enum as CUstreamBatchMemOpType; -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamBatchMemOpParams_union { - pub operation: CUstreamBatchMemOpType, - pub waitValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st, - pub writeValue: CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st, - pub flushRemoteWrites: CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st, - pub pad: [cuuint64_t; 6usize], - _bindgen_union_align: [u64; 6usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st { - pub operation: CUstreamBatchMemOpType, - pub address: CUdeviceptr, - pub __bindgen_anon_1: - CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub alias: CUdeviceptr, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 { - pub value: cuuint32_t, - pub value64: cuuint64_t, - _bindgen_union_align: u64, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st { - pub operation: CUstreamBatchMemOpType, - pub address: CUdeviceptr, - pub __bindgen_anon_1: - CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub alias: CUdeviceptr, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 { - pub value: cuuint32_t, - pub value64: cuuint64_t, - _bindgen_union_align: u64, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st { - pub operation: CUstreamBatchMemOpType, - pub flags: ::std::os::raw::c_uint, -} -pub type CUstreamBatchMemOpParams = CUstreamBatchMemOpParams_union; -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNSIGNED_INT8: CUarray_format_enum = CUarray_format_enum(1); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNSIGNED_INT16: CUarray_format_enum = CUarray_format_enum(2); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_UNSIGNED_INT32: CUarray_format_enum = CUarray_format_enum(3); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SIGNED_INT8: CUarray_format_enum = CUarray_format_enum(8); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SIGNED_INT16: CUarray_format_enum = CUarray_format_enum(9); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_SIGNED_INT32: CUarray_format_enum = CUarray_format_enum(10); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_HALF: CUarray_format_enum = CUarray_format_enum(16); -} -impl CUarray_format_enum { - pub const CU_AD_FORMAT_FLOAT: CUarray_format_enum = CUarray_format_enum(32); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUarray_format_enum(pub ::std::os::raw::c_uint); -pub use self::CUarray_format_enum as CUarray_format; -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_WRAP: CUaddress_mode_enum = CUaddress_mode_enum(0); -} -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_CLAMP: CUaddress_mode_enum = CUaddress_mode_enum(1); -} -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_MIRROR: CUaddress_mode_enum = CUaddress_mode_enum(2); -} -impl CUaddress_mode_enum { - pub const CU_TR_ADDRESS_MODE_BORDER: CUaddress_mode_enum = CUaddress_mode_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUaddress_mode_enum(pub ::std::os::raw::c_uint); -pub use self::CUaddress_mode_enum as CUaddress_mode; -impl CUfilter_mode_enum { - pub const CU_TR_FILTER_MODE_POINT: CUfilter_mode_enum = CUfilter_mode_enum(0); -} -impl CUfilter_mode_enum { - pub const CU_TR_FILTER_MODE_LINEAR: CUfilter_mode_enum = CUfilter_mode_enum(1); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUfilter_mode_enum(pub ::std::os::raw::c_uint); -pub use self::CUfilter_mode_enum as CUfilter_mode; -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(1); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: CUdevice_attribute_enum = - CUdevice_attribute_enum(2); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: CUdevice_attribute_enum = - CUdevice_attribute_enum(3); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: CUdevice_attribute_enum = - CUdevice_attribute_enum(4); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: CUdevice_attribute_enum = - CUdevice_attribute_enum(5); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: CUdevice_attribute_enum = - CUdevice_attribute_enum(6); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: CUdevice_attribute_enum = - CUdevice_attribute_enum(7); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(8); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(8); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: CUdevice_attribute_enum = - CUdevice_attribute_enum(9); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_WARP_SIZE: CUdevice_attribute_enum = CUdevice_attribute_enum(10); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_PITCH: CUdevice_attribute_enum = CUdevice_attribute_enum(11); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(12); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(12); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CLOCK_RATE: CUdevice_attribute_enum = CUdevice_attribute_enum(13); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: CUdevice_attribute_enum = - CUdevice_attribute_enum(14); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: CUdevice_attribute_enum = - CUdevice_attribute_enum(15); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: CUdevice_attribute_enum = - CUdevice_attribute_enum(16); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: CUdevice_attribute_enum = - CUdevice_attribute_enum(17); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_INTEGRATED: CUdevice_attribute_enum = CUdevice_attribute_enum(18); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: CUdevice_attribute_enum = - CUdevice_attribute_enum(19); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: CUdevice_attribute_enum = - CUdevice_attribute_enum(20); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(21); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(22); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(23); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(24); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(25); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(26); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(27); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(28); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(29); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(27); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(28); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES: CUdevice_attribute_enum = - CUdevice_attribute_enum(29); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT: CUdevice_attribute_enum = - CUdevice_attribute_enum(30); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: CUdevice_attribute_enum = - CUdevice_attribute_enum(31); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_ECC_ENABLED: CUdevice_attribute_enum = - CUdevice_attribute_enum(32); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: CUdevice_attribute_enum = CUdevice_attribute_enum(33); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: CUdevice_attribute_enum = - CUdevice_attribute_enum(34); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TCC_DRIVER: CUdevice_attribute_enum = CUdevice_attribute_enum(35); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(36); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(37); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: CUdevice_attribute_enum = - CUdevice_attribute_enum(38); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(39); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT: CUdevice_attribute_enum = - CUdevice_attribute_enum(40); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: CUdevice_attribute_enum = - CUdevice_attribute_enum(41); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(42); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(43); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER: CUdevice_attribute_enum = - CUdevice_attribute_enum(44); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(45); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(46); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(47); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(48); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: CUdevice_attribute_enum = - CUdevice_attribute_enum(49); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: CUdevice_attribute_enum = - CUdevice_attribute_enum(50); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: CUdevice_attribute_enum = - CUdevice_attribute_enum(51); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(52); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(53); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(54); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(55); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(56); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(57); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(58); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(59); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(60); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(61); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(62); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(63); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(64); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(65); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(66); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(67); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: CUdevice_attribute_enum = - CUdevice_attribute_enum(68); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(69); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(70); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(71); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: CUdevice_attribute_enum = - CUdevice_attribute_enum(72); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(73); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: CUdevice_attribute_enum = - CUdevice_attribute_enum(74); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(75); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(76); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: CUdevice_attribute_enum = - CUdevice_attribute_enum(77); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(78); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(79); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(80); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(81); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(82); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: CUdevice_attribute_enum = - CUdevice_attribute_enum(83); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: CUdevice_attribute_enum = - CUdevice_attribute_enum(84); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: CUdevice_attribute_enum = - CUdevice_attribute_enum(85); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(86); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: CUdevice_attribute_enum = - CUdevice_attribute_enum(87); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: CUdevice_attribute_enum = - CUdevice_attribute_enum(88); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: CUdevice_attribute_enum = - CUdevice_attribute_enum(89); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(90); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: CUdevice_attribute_enum = - CUdevice_attribute_enum(91); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS: CUdevice_attribute_enum = - CUdevice_attribute_enum(92); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS: CUdevice_attribute_enum = - CUdevice_attribute_enum(93); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(94); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH: CUdevice_attribute_enum = - CUdevice_attribute_enum(95); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH: CUdevice_attribute_enum = - CUdevice_attribute_enum(96); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: CUdevice_attribute_enum = - CUdevice_attribute_enum(97); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES: CUdevice_attribute_enum = - CUdevice_attribute_enum(98); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(99); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: - CUdevice_attribute_enum = CUdevice_attribute_enum(100); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: CUdevice_attribute_enum = - CUdevice_attribute_enum(101); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(102); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: - CUdevice_attribute_enum = CUdevice_attribute_enum(103); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(104); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(105); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: CUdevice_attribute_enum = - CUdevice_attribute_enum(106); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(107); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: CUdevice_attribute_enum = - CUdevice_attribute_enum(108); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: CUdevice_attribute_enum = - CUdevice_attribute_enum(109); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: CUdevice_attribute_enum = - CUdevice_attribute_enum(110); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: CUdevice_attribute_enum = - CUdevice_attribute_enum(111); -} -impl CUdevice_attribute_enum { - pub const CU_DEVICE_ATTRIBUTE_MAX: CUdevice_attribute_enum = CUdevice_attribute_enum(112); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUdevice_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUdevice_attribute_enum as CUdevice_attribute; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUdevprop_st { - pub maxThreadsPerBlock: ::std::os::raw::c_int, - pub maxThreadsDim: [::std::os::raw::c_int; 3usize], - pub maxGridSize: [::std::os::raw::c_int; 3usize], - pub sharedMemPerBlock: ::std::os::raw::c_int, - pub totalConstantMemory: ::std::os::raw::c_int, - pub SIMDWidth: ::std::os::raw::c_int, - pub memPitch: ::std::os::raw::c_int, - pub regsPerBlock: ::std::os::raw::c_int, - pub clockRate: ::std::os::raw::c_int, - pub textureAlign: ::std::os::raw::c_int, -} -pub type CUdevprop = CUdevprop_st; -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_CONTEXT: CUpointer_attribute_enum = CUpointer_attribute_enum(1); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_MEMORY_TYPE: CUpointer_attribute_enum = - CUpointer_attribute_enum(2); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_DEVICE_POINTER: CUpointer_attribute_enum = - CUpointer_attribute_enum(3); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_HOST_POINTER: CUpointer_attribute_enum = - CUpointer_attribute_enum(4); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_P2P_TOKENS: CUpointer_attribute_enum = - CUpointer_attribute_enum(5); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: CUpointer_attribute_enum = - CUpointer_attribute_enum(6); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_BUFFER_ID: CUpointer_attribute_enum = - CUpointer_attribute_enum(7); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_IS_MANAGED: CUpointer_attribute_enum = - CUpointer_attribute_enum(8); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: CUpointer_attribute_enum = - CUpointer_attribute_enum(9); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: CUpointer_attribute_enum = - CUpointer_attribute_enum(10); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: CUpointer_attribute_enum = - CUpointer_attribute_enum(11); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_RANGE_SIZE: CUpointer_attribute_enum = - CUpointer_attribute_enum(12); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_MAPPED: CUpointer_attribute_enum = CUpointer_attribute_enum(13); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: CUpointer_attribute_enum = - CUpointer_attribute_enum(14); -} -impl CUpointer_attribute_enum { - pub const CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE: CUpointer_attribute_enum = - CUpointer_attribute_enum(15); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUpointer_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUpointer_attribute_enum as CUpointer_attribute; -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUfunction_attribute_enum = - CUfunction_attribute_enum(0); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(1); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(2); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(3); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_NUM_REGS: CUfunction_attribute_enum = CUfunction_attribute_enum(4); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_PTX_VERSION: CUfunction_attribute_enum = - CUfunction_attribute_enum(5); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_BINARY_VERSION: CUfunction_attribute_enum = - CUfunction_attribute_enum(6); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_CACHE_MODE_CA: CUfunction_attribute_enum = - CUfunction_attribute_enum(7); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: CUfunction_attribute_enum = - CUfunction_attribute_enum(8); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: CUfunction_attribute_enum = - CUfunction_attribute_enum(9); -} -impl CUfunction_attribute_enum { - pub const CU_FUNC_ATTRIBUTE_MAX: CUfunction_attribute_enum = CUfunction_attribute_enum(10); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUfunction_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUfunction_attribute_enum as CUfunction_attribute; -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_NONE: CUfunc_cache_enum = CUfunc_cache_enum(0); -} -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_SHARED: CUfunc_cache_enum = CUfunc_cache_enum(1); -} -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_L1: CUfunc_cache_enum = CUfunc_cache_enum(2); -} -impl CUfunc_cache_enum { - pub const CU_FUNC_CACHE_PREFER_EQUAL: CUfunc_cache_enum = CUfunc_cache_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUfunc_cache_enum(pub ::std::os::raw::c_uint); -pub use self::CUfunc_cache_enum as CUfunc_cache; -impl CUsharedconfig_enum { - pub const CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: CUsharedconfig_enum = CUsharedconfig_enum(0); -} -impl CUsharedconfig_enum { - pub const CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: CUsharedconfig_enum = - CUsharedconfig_enum(1); -} -impl CUsharedconfig_enum { - pub const CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: CUsharedconfig_enum = - CUsharedconfig_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUsharedconfig_enum(pub ::std::os::raw::c_uint); -pub use self::CUsharedconfig_enum as CUsharedconfig; -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_HOST: CUmemorytype_enum = CUmemorytype_enum(1); -} -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_DEVICE: CUmemorytype_enum = CUmemorytype_enum(2); -} -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_ARRAY: CUmemorytype_enum = CUmemorytype_enum(3); -} -impl CUmemorytype_enum { - pub const CU_MEMORYTYPE_UNIFIED: CUmemorytype_enum = CUmemorytype_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmemorytype_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemorytype_enum as CUmemorytype; -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_SET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(1); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_UNSET_READ_MOSTLY: CUmem_advise_enum = CUmem_advise_enum(2); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_SET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(3); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: CUmem_advise_enum = CUmem_advise_enum(4); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_SET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(5); -} -impl CUmem_advise_enum { - pub const CU_MEM_ADVISE_UNSET_ACCESSED_BY: CUmem_advise_enum = CUmem_advise_enum(6); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmem_advise_enum(pub ::std::os::raw::c_uint); -pub use self::CUmem_advise_enum as CUmem_advise; -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(1); -} -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(2); -} -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(3); -} -impl CUmem_range_attribute_enum { - pub const CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: CUmem_range_attribute_enum = - CUmem_range_attribute_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmem_range_attribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUmem_range_attribute_enum as CUmem_range_attribute; -impl CUjit_option_enum { - pub const CU_JIT_MAX_REGISTERS: CUjit_option_enum = CUjit_option_enum(0); -} -impl CUjit_option_enum { - pub const CU_JIT_THREADS_PER_BLOCK: CUjit_option_enum = CUjit_option_enum(1); -} -impl CUjit_option_enum { - pub const CU_JIT_WALL_TIME: CUjit_option_enum = CUjit_option_enum(2); -} -impl CUjit_option_enum { - pub const CU_JIT_INFO_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(3); -} -impl CUjit_option_enum { - pub const CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(4); -} -impl CUjit_option_enum { - pub const CU_JIT_ERROR_LOG_BUFFER: CUjit_option_enum = CUjit_option_enum(5); -} -impl CUjit_option_enum { - pub const CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = CUjit_option_enum(6); -} -impl CUjit_option_enum { - pub const CU_JIT_OPTIMIZATION_LEVEL: CUjit_option_enum = CUjit_option_enum(7); -} -impl CUjit_option_enum { - pub const CU_JIT_TARGET_FROM_CUCONTEXT: CUjit_option_enum = CUjit_option_enum(8); -} -impl CUjit_option_enum { - pub const CU_JIT_TARGET: CUjit_option_enum = CUjit_option_enum(9); -} -impl CUjit_option_enum { - pub const CU_JIT_FALLBACK_STRATEGY: CUjit_option_enum = CUjit_option_enum(10); -} -impl CUjit_option_enum { - pub const CU_JIT_GENERATE_DEBUG_INFO: CUjit_option_enum = CUjit_option_enum(11); -} -impl CUjit_option_enum { - pub const CU_JIT_LOG_VERBOSE: CUjit_option_enum = CUjit_option_enum(12); -} -impl CUjit_option_enum { - pub const CU_JIT_GENERATE_LINE_INFO: CUjit_option_enum = CUjit_option_enum(13); -} -impl CUjit_option_enum { - pub const CU_JIT_CACHE_MODE: CUjit_option_enum = CUjit_option_enum(14); -} -impl CUjit_option_enum { - pub const CU_JIT_NEW_SM3X_OPT: CUjit_option_enum = CUjit_option_enum(15); -} -impl CUjit_option_enum { - pub const CU_JIT_FAST_COMPILE: CUjit_option_enum = CUjit_option_enum(16); -} -impl CUjit_option_enum { - pub const CU_JIT_GLOBAL_SYMBOL_NAMES: CUjit_option_enum = CUjit_option_enum(17); -} -impl CUjit_option_enum { - pub const CU_JIT_GLOBAL_SYMBOL_ADDRESSES: CUjit_option_enum = CUjit_option_enum(18); -} -impl CUjit_option_enum { - pub const CU_JIT_GLOBAL_SYMBOL_COUNT: CUjit_option_enum = CUjit_option_enum(19); -} -impl CUjit_option_enum { - pub const CU_JIT_NUM_OPTIONS: CUjit_option_enum = CUjit_option_enum(20); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUjit_option_enum(pub ::std::os::raw::c_uint); -pub use self::CUjit_option_enum as CUjit_option; -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_CUBIN: CUjitInputType_enum = CUjitInputType_enum(0); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_PTX: CUjitInputType_enum = CUjitInputType_enum(1); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_FATBINARY: CUjitInputType_enum = CUjitInputType_enum(2); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_OBJECT: CUjitInputType_enum = CUjitInputType_enum(3); -} -impl CUjitInputType_enum { - pub const CU_JIT_INPUT_LIBRARY: CUjitInputType_enum = CUjitInputType_enum(4); -} -impl CUjitInputType_enum { - pub const CU_JIT_NUM_INPUT_TYPES: CUjitInputType_enum = CUjitInputType_enum(5); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUjitInputType_enum(pub ::std::os::raw::c_uint); -pub use self::CUjitInputType_enum as CUjitInputType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUlinkState_st { - _unused: [u8; 0], -} -pub type CUlinkState = *mut CUlinkState_st; -impl CUlimit_enum { - pub const CU_LIMIT_STACK_SIZE: CUlimit_enum = CUlimit_enum(0); -} -impl CUlimit_enum { - pub const CU_LIMIT_PRINTF_FIFO_SIZE: CUlimit_enum = CUlimit_enum(1); -} -impl CUlimit_enum { - pub const CU_LIMIT_MALLOC_HEAP_SIZE: CUlimit_enum = CUlimit_enum(2); -} -impl CUlimit_enum { - pub const CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: CUlimit_enum = CUlimit_enum(3); -} -impl CUlimit_enum { - pub const CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: CUlimit_enum = CUlimit_enum(4); -} -impl CUlimit_enum { - pub const CU_LIMIT_MAX_L2_FETCH_GRANULARITY: CUlimit_enum = CUlimit_enum(5); -} -impl CUlimit_enum { - pub const CU_LIMIT_PERSISTING_L2_CACHE_SIZE: CUlimit_enum = CUlimit_enum(6); -} -impl CUlimit_enum { - pub const CU_LIMIT_MAX: CUlimit_enum = CUlimit_enum(7); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUlimit_enum(pub ::std::os::raw::c_uint); -pub use self::CUlimit_enum as CUlimit; -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_ARRAY: CUresourcetype_enum = CUresourcetype_enum(0); -} -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: CUresourcetype_enum = CUresourcetype_enum(1); -} -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_LINEAR: CUresourcetype_enum = CUresourcetype_enum(2); -} -impl CUresourcetype_enum { - pub const CU_RESOURCE_TYPE_PITCH2D: CUresourcetype_enum = CUresourcetype_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUresourcetype_enum(pub ::std::os::raw::c_uint); -pub use self::CUresourcetype_enum as CUresourcetype; -pub type CUhostFn = - ::std::option::Option; -impl CUaccessProperty_enum { - pub const CU_ACCESS_PROPERTY_NORMAL: CUaccessProperty_enum = CUaccessProperty_enum(0); -} -impl CUaccessProperty_enum { - pub const CU_ACCESS_PROPERTY_STREAMING: CUaccessProperty_enum = CUaccessProperty_enum(1); -} -impl CUaccessProperty_enum { - pub const CU_ACCESS_PROPERTY_PERSISTING: CUaccessProperty_enum = CUaccessProperty_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUaccessProperty_enum(pub ::std::os::raw::c_uint); -pub use self::CUaccessProperty_enum as CUaccessProperty; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUaccessPolicyWindow_st { - pub base_ptr: *mut ::std::os::raw::c_void, - pub num_bytes: usize, - pub hitRatio: f32, - pub hitProp: CUaccessProperty, - pub missProp: CUaccessProperty, -} -pub type CUaccessPolicyWindow = CUaccessPolicyWindow_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_KERNEL_NODE_PARAMS_st { - pub func: CUfunction, - pub gridDimX: ::std::os::raw::c_uint, - pub gridDimY: ::std::os::raw::c_uint, - pub gridDimZ: ::std::os::raw::c_uint, - pub blockDimX: ::std::os::raw::c_uint, - pub blockDimY: ::std::os::raw::c_uint, - pub blockDimZ: ::std::os::raw::c_uint, - pub sharedMemBytes: ::std::os::raw::c_uint, - pub kernelParams: *mut *mut ::std::os::raw::c_void, - pub extra: *mut *mut ::std::os::raw::c_void, -} -pub type CUDA_KERNEL_NODE_PARAMS = CUDA_KERNEL_NODE_PARAMS_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_MEMSET_NODE_PARAMS_st { - pub dst: CUdeviceptr, - pub pitch: usize, - pub value: ::std::os::raw::c_uint, - pub elementSize: ::std::os::raw::c_uint, - pub width: usize, - pub height: usize, -} -pub type CUDA_MEMSET_NODE_PARAMS = CUDA_MEMSET_NODE_PARAMS_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_HOST_NODE_PARAMS_st { - pub fn_: CUhostFn, - pub userData: *mut ::std::os::raw::c_void, -} -pub type CUDA_HOST_NODE_PARAMS = CUDA_HOST_NODE_PARAMS_st; -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_KERNEL: CUgraphNodeType_enum = CUgraphNodeType_enum(0); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_MEMCPY: CUgraphNodeType_enum = CUgraphNodeType_enum(1); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_MEMSET: CUgraphNodeType_enum = CUgraphNodeType_enum(2); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_HOST: CUgraphNodeType_enum = CUgraphNodeType_enum(3); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_GRAPH: CUgraphNodeType_enum = CUgraphNodeType_enum(4); -} -impl CUgraphNodeType_enum { - pub const CU_GRAPH_NODE_TYPE_EMPTY: CUgraphNodeType_enum = CUgraphNodeType_enum(5); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUgraphNodeType_enum(pub ::std::os::raw::c_uint); -pub use self::CUgraphNodeType_enum as CUgraphNodeType; -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_AUTO: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(1); -} -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_SPIN: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(2); -} -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_YIELD: CUsynchronizationPolicy_enum = CUsynchronizationPolicy_enum(3); -} -impl CUsynchronizationPolicy_enum { - pub const CU_SYNC_POLICY_BLOCKING_SYNC: CUsynchronizationPolicy_enum = - CUsynchronizationPolicy_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUsynchronizationPolicy_enum(pub ::std::os::raw::c_uint); -pub use self::CUsynchronizationPolicy_enum as CUsynchronizationPolicy; -impl CUkernelNodeAttrID_enum { - pub const CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW: CUkernelNodeAttrID_enum = - CUkernelNodeAttrID_enum(1); -} -impl CUkernelNodeAttrID_enum { - pub const CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE: CUkernelNodeAttrID_enum = - CUkernelNodeAttrID_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUkernelNodeAttrID_enum(pub ::std::os::raw::c_uint); -pub use self::CUkernelNodeAttrID_enum as CUkernelNodeAttrID; -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUkernelNodeAttrValue_union { - pub accessPolicyWindow: CUaccessPolicyWindow, - pub cooperative: ::std::os::raw::c_int, - _bindgen_union_align: [u64; 4usize], -} -pub type CUkernelNodeAttrValue = CUkernelNodeAttrValue_union; -impl CUstreamCaptureStatus_enum { - pub const CU_STREAM_CAPTURE_STATUS_NONE: CUstreamCaptureStatus_enum = - CUstreamCaptureStatus_enum(0); -} -impl CUstreamCaptureStatus_enum { - pub const CU_STREAM_CAPTURE_STATUS_ACTIVE: CUstreamCaptureStatus_enum = - CUstreamCaptureStatus_enum(1); -} -impl CUstreamCaptureStatus_enum { - pub const CU_STREAM_CAPTURE_STATUS_INVALIDATED: CUstreamCaptureStatus_enum = - CUstreamCaptureStatus_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUstreamCaptureStatus_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamCaptureStatus_enum as CUstreamCaptureStatus; -impl CUstreamCaptureMode_enum { - pub const CU_STREAM_CAPTURE_MODE_GLOBAL: CUstreamCaptureMode_enum = CUstreamCaptureMode_enum(0); -} -impl CUstreamCaptureMode_enum { - pub const CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: CUstreamCaptureMode_enum = - CUstreamCaptureMode_enum(1); -} -impl CUstreamCaptureMode_enum { - pub const CU_STREAM_CAPTURE_MODE_RELAXED: CUstreamCaptureMode_enum = - CUstreamCaptureMode_enum(2); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUstreamCaptureMode_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamCaptureMode_enum as CUstreamCaptureMode; -impl CUstreamAttrID_enum { - pub const CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW: CUstreamAttrID_enum = - CUstreamAttrID_enum(1); -} -impl CUstreamAttrID_enum { - pub const CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY: CUstreamAttrID_enum = - CUstreamAttrID_enum(3); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUstreamAttrID_enum(pub ::std::os::raw::c_uint); -pub use self::CUstreamAttrID_enum as CUstreamAttrID; -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUstreamAttrValue_union { - pub accessPolicyWindow: CUaccessPolicyWindow, - pub syncPolicy: CUsynchronizationPolicy, - _bindgen_union_align: [u64; 4usize], -} -pub type CUstreamAttrValue = CUstreamAttrValue_union; -impl cudaError_enum { - pub const CUDA_SUCCESS: cudaError_enum = cudaError_enum(0); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_VALUE: cudaError_enum = cudaError_enum(1); -} -impl cudaError_enum { - pub const CUDA_ERROR_OUT_OF_MEMORY: cudaError_enum = cudaError_enum(2); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_INITIALIZED: cudaError_enum = cudaError_enum(3); -} -impl cudaError_enum { - pub const CUDA_ERROR_DEINITIALIZED: cudaError_enum = cudaError_enum(4); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_DISABLED: cudaError_enum = cudaError_enum(5); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_NOT_INITIALIZED: cudaError_enum = cudaError_enum(6); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_ALREADY_STARTED: cudaError_enum = cudaError_enum(7); -} -impl cudaError_enum { - pub const CUDA_ERROR_PROFILER_ALREADY_STOPPED: cudaError_enum = cudaError_enum(8); -} -impl cudaError_enum { - pub const CUDA_ERROR_NO_DEVICE: cudaError_enum = cudaError_enum(100); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_DEVICE: cudaError_enum = cudaError_enum(101); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_IMAGE: cudaError_enum = cudaError_enum(200); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_CONTEXT: cudaError_enum = cudaError_enum(201); -} -impl cudaError_enum { - pub const CUDA_ERROR_CONTEXT_ALREADY_CURRENT: cudaError_enum = cudaError_enum(202); -} -impl cudaError_enum { - pub const CUDA_ERROR_MAP_FAILED: cudaError_enum = cudaError_enum(205); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNMAP_FAILED: cudaError_enum = cudaError_enum(206); -} -impl cudaError_enum { - pub const CUDA_ERROR_ARRAY_IS_MAPPED: cudaError_enum = cudaError_enum(207); -} -impl cudaError_enum { - pub const CUDA_ERROR_ALREADY_MAPPED: cudaError_enum = cudaError_enum(208); -} -impl cudaError_enum { - pub const CUDA_ERROR_NO_BINARY_FOR_GPU: cudaError_enum = cudaError_enum(209); -} -impl cudaError_enum { - pub const CUDA_ERROR_ALREADY_ACQUIRED: cudaError_enum = cudaError_enum(210); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_MAPPED: cudaError_enum = cudaError_enum(211); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_MAPPED_AS_ARRAY: cudaError_enum = cudaError_enum(212); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_MAPPED_AS_POINTER: cudaError_enum = cudaError_enum(213); -} -impl cudaError_enum { - pub const CUDA_ERROR_ECC_UNCORRECTABLE: cudaError_enum = cudaError_enum(214); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNSUPPORTED_LIMIT: cudaError_enum = cudaError_enum(215); -} -impl cudaError_enum { - pub const CUDA_ERROR_CONTEXT_ALREADY_IN_USE: cudaError_enum = cudaError_enum(216); -} -impl cudaError_enum { - pub const CUDA_ERROR_PEER_ACCESS_UNSUPPORTED: cudaError_enum = cudaError_enum(217); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_PTX: cudaError_enum = cudaError_enum(218); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_GRAPHICS_CONTEXT: cudaError_enum = cudaError_enum(219); -} -impl cudaError_enum { - pub const CUDA_ERROR_NVLINK_UNCORRECTABLE: cudaError_enum = cudaError_enum(220); -} -impl cudaError_enum { - pub const CUDA_ERROR_JIT_COMPILER_NOT_FOUND: cudaError_enum = cudaError_enum(221); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_SOURCE: cudaError_enum = cudaError_enum(300); -} -impl cudaError_enum { - pub const CUDA_ERROR_FILE_NOT_FOUND: cudaError_enum = cudaError_enum(301); -} -impl cudaError_enum { - pub const CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: cudaError_enum = cudaError_enum(302); -} -impl cudaError_enum { - pub const CUDA_ERROR_SHARED_OBJECT_INIT_FAILED: cudaError_enum = cudaError_enum(303); -} -impl cudaError_enum { - pub const CUDA_ERROR_OPERATING_SYSTEM: cudaError_enum = cudaError_enum(304); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_HANDLE: cudaError_enum = cudaError_enum(400); -} -impl cudaError_enum { - pub const CUDA_ERROR_ILLEGAL_STATE: cudaError_enum = cudaError_enum(401); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_FOUND: cudaError_enum = cudaError_enum(500); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_READY: cudaError_enum = cudaError_enum(600); -} -impl cudaError_enum { - pub const CUDA_ERROR_ILLEGAL_ADDRESS: cudaError_enum = cudaError_enum(700); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: cudaError_enum = cudaError_enum(701); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_TIMEOUT: cudaError_enum = cudaError_enum(702); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING: cudaError_enum = cudaError_enum(703); -} -impl cudaError_enum { - pub const CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED: cudaError_enum = cudaError_enum(704); -} -impl cudaError_enum { - pub const CUDA_ERROR_PEER_ACCESS_NOT_ENABLED: cudaError_enum = cudaError_enum(705); -} -impl cudaError_enum { - pub const CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE: cudaError_enum = cudaError_enum(708); -} -impl cudaError_enum { - pub const CUDA_ERROR_CONTEXT_IS_DESTROYED: cudaError_enum = cudaError_enum(709); -} -impl cudaError_enum { - pub const CUDA_ERROR_ASSERT: cudaError_enum = cudaError_enum(710); -} -impl cudaError_enum { - pub const CUDA_ERROR_TOO_MANY_PEERS: cudaError_enum = cudaError_enum(711); -} -impl cudaError_enum { - pub const CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED: cudaError_enum = cudaError_enum(712); -} -impl cudaError_enum { - pub const CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED: cudaError_enum = cudaError_enum(713); -} -impl cudaError_enum { - pub const CUDA_ERROR_HARDWARE_STACK_ERROR: cudaError_enum = cudaError_enum(714); -} -impl cudaError_enum { - pub const CUDA_ERROR_ILLEGAL_INSTRUCTION: cudaError_enum = cudaError_enum(715); -} -impl cudaError_enum { - pub const CUDA_ERROR_MISALIGNED_ADDRESS: cudaError_enum = cudaError_enum(716); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_ADDRESS_SPACE: cudaError_enum = cudaError_enum(717); -} -impl cudaError_enum { - pub const CUDA_ERROR_INVALID_PC: cudaError_enum = cudaError_enum(718); -} -impl cudaError_enum { - pub const CUDA_ERROR_LAUNCH_FAILED: cudaError_enum = cudaError_enum(719); -} -impl cudaError_enum { - pub const CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE: cudaError_enum = cudaError_enum(720); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_PERMITTED: cudaError_enum = cudaError_enum(800); -} -impl cudaError_enum { - pub const CUDA_ERROR_NOT_SUPPORTED: cudaError_enum = cudaError_enum(801); -} -impl cudaError_enum { - pub const CUDA_ERROR_SYSTEM_NOT_READY: cudaError_enum = cudaError_enum(802); -} -impl cudaError_enum { - pub const CUDA_ERROR_SYSTEM_DRIVER_MISMATCH: cudaError_enum = cudaError_enum(803); -} -impl cudaError_enum { - pub const CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: cudaError_enum = cudaError_enum(804); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED: cudaError_enum = cudaError_enum(900); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_INVALIDATED: cudaError_enum = cudaError_enum(901); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_MERGE: cudaError_enum = cudaError_enum(902); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_UNMATCHED: cudaError_enum = cudaError_enum(903); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_UNJOINED: cudaError_enum = cudaError_enum(904); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_ISOLATION: cudaError_enum = cudaError_enum(905); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_IMPLICIT: cudaError_enum = cudaError_enum(906); -} -impl cudaError_enum { - pub const CUDA_ERROR_CAPTURED_EVENT: cudaError_enum = cudaError_enum(907); -} -impl cudaError_enum { - pub const CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD: cudaError_enum = cudaError_enum(908); -} -impl cudaError_enum { - pub const CUDA_ERROR_TIMEOUT: cudaError_enum = cudaError_enum(909); -} -impl cudaError_enum { - pub const CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE: cudaError_enum = cudaError_enum(910); -} -impl cudaError_enum { - pub const CUDA_ERROR_UNKNOWN: cudaError_enum = cudaError_enum(999); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct cudaError_enum(pub ::std::os::raw::c_uint); -pub use self::cudaError_enum as CUresult; -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(1); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(2); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(3); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(4); -} -impl CUdevice_P2PAttribute_enum { - pub const CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: CUdevice_P2PAttribute_enum = - CUdevice_P2PAttribute_enum(4); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUdevice_P2PAttribute_enum(pub ::std::os::raw::c_uint); -pub use self::CUdevice_P2PAttribute_enum as CUdevice_P2PAttribute; -pub type CUstreamCallback = ::std::option::Option< - unsafe extern "system" fn( - hStream: CUstream, - status: CUresult, - userData: *mut ::std::os::raw::c_void, - ), ->; -pub type CUoccupancyB2DSize = - ::std::option::Option usize>; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_MEMCPY2D_st { - pub srcXInBytes: usize, - pub srcY: usize, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr, - pub srcArray: CUarray, - pub srcPitch: usize, - pub dstXInBytes: usize, - pub dstY: usize, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr, - pub dstArray: CUarray, - pub dstPitch: usize, - pub WidthInBytes: usize, - pub Height: usize, -} -pub type CUDA_MEMCPY2D = CUDA_MEMCPY2D_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_MEMCPY3D_st { - pub srcXInBytes: usize, - pub srcY: usize, - pub srcZ: usize, - pub srcLOD: usize, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr, - pub srcArray: CUarray, - pub reserved0: *mut ::std::os::raw::c_void, - pub srcPitch: usize, - pub srcHeight: usize, - pub dstXInBytes: usize, - pub dstY: usize, - pub dstZ: usize, - pub dstLOD: usize, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr, - pub dstArray: CUarray, - pub reserved1: *mut ::std::os::raw::c_void, - pub dstPitch: usize, - pub dstHeight: usize, - pub WidthInBytes: usize, - pub Height: usize, - pub Depth: usize, -} -pub type CUDA_MEMCPY3D = CUDA_MEMCPY3D_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_MEMCPY3D_PEER_st { - pub srcXInBytes: usize, - pub srcY: usize, - pub srcZ: usize, - pub srcLOD: usize, - pub srcMemoryType: CUmemorytype, - pub srcHost: *const ::std::os::raw::c_void, - pub srcDevice: CUdeviceptr, - pub srcArray: CUarray, - pub srcContext: CUcontext, - pub srcPitch: usize, - pub srcHeight: usize, - pub dstXInBytes: usize, - pub dstY: usize, - pub dstZ: usize, - pub dstLOD: usize, - pub dstMemoryType: CUmemorytype, - pub dstHost: *mut ::std::os::raw::c_void, - pub dstDevice: CUdeviceptr, - pub dstArray: CUarray, - pub dstContext: CUcontext, - pub dstPitch: usize, - pub dstHeight: usize, - pub WidthInBytes: usize, - pub Height: usize, - pub Depth: usize, -} -pub type CUDA_MEMCPY3D_PEER = CUDA_MEMCPY3D_PEER_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_ARRAY_DESCRIPTOR_st { - pub Width: usize, - pub Height: usize, - pub Format: CUarray_format, - pub NumChannels: ::std::os::raw::c_uint, -} -pub type CUDA_ARRAY_DESCRIPTOR = CUDA_ARRAY_DESCRIPTOR_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_ARRAY3D_DESCRIPTOR_st { - pub Width: usize, - pub Height: usize, - pub Depth: usize, - pub Format: CUarray_format, - pub NumChannels: ::std::os::raw::c_uint, - pub Flags: ::std::os::raw::c_uint, -} -pub type CUDA_ARRAY3D_DESCRIPTOR = CUDA_ARRAY3D_DESCRIPTOR_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_DESC_st { - pub resType: CUresourcetype, - pub res: CUDA_RESOURCE_DESC_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_RESOURCE_DESC_st__bindgen_ty_1 { - pub array: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1, - pub mipmap: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2, - pub linear: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3, - pub pitch2D: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4, - pub reserved: CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5, - _bindgen_union_align: [u64; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - pub hArray: CUarray, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 { - pub hMipmappedArray: CUmipmappedArray, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 { - pub devPtr: CUdeviceptr, - pub format: CUarray_format, - pub numChannels: ::std::os::raw::c_uint, - pub sizeInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 { - pub devPtr: CUdeviceptr, - pub format: CUarray_format, - pub numChannels: ::std::os::raw::c_uint, - pub width: usize, - pub height: usize, - pub pitchInBytes: usize, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 { - pub reserved: [::std::os::raw::c_int; 32usize], -} -pub type CUDA_RESOURCE_DESC = CUDA_RESOURCE_DESC_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_TEXTURE_DESC_st { - pub addressMode: [CUaddress_mode; 3usize], - pub filterMode: CUfilter_mode, - pub flags: ::std::os::raw::c_uint, - pub maxAnisotropy: ::std::os::raw::c_uint, - pub mipmapFilterMode: CUfilter_mode, - pub mipmapLevelBias: f32, - pub minMipmapLevelClamp: f32, - pub maxMipmapLevelClamp: f32, - pub borderColor: [f32; 4usize], - pub reserved: [::std::os::raw::c_int; 12usize], -} -pub type CUDA_TEXTURE_DESC = CUDA_TEXTURE_DESC_st; -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_NONE: CUresourceViewFormat_enum = CUresourceViewFormat_enum(0); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(1); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(2); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(3); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_1X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(4); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_2X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(5); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_4X8: CUresourceViewFormat_enum = CUresourceViewFormat_enum(6); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_1X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(7); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_2X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(8); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_4X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(9); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_1X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(10); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_2X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(11); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_4X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(12); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_1X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(13); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_2X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(14); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UINT_4X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(15); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_1X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(16); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_2X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(17); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SINT_4X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(18); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_1X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(19); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_2X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(20); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_4X16: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(21); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_1X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(22); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_2X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(23); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_FLOAT_4X32: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(24); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC1: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(25); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC2: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(26); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC3: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(27); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC4: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(28); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SIGNED_BC4: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(29); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC5: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(30); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SIGNED_BC5: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(31); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC6H: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(32); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_SIGNED_BC6H: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(33); -} -impl CUresourceViewFormat_enum { - pub const CU_RES_VIEW_FORMAT_UNSIGNED_BC7: CUresourceViewFormat_enum = - CUresourceViewFormat_enum(34); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUresourceViewFormat_enum(pub ::std::os::raw::c_uint); -pub use self::CUresourceViewFormat_enum as CUresourceViewFormat; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_RESOURCE_VIEW_DESC_st { - pub format: CUresourceViewFormat, - pub width: usize, - pub height: usize, - pub depth: usize, - pub firstMipmapLevel: ::std::os::raw::c_uint, - pub lastMipmapLevel: ::std::os::raw::c_uint, - pub firstLayer: ::std::os::raw::c_uint, - pub lastLayer: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -pub type CUDA_RESOURCE_VIEW_DESC = CUDA_RESOURCE_VIEW_DESC_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_LAUNCH_PARAMS_st { - pub function: CUfunction, - pub gridDimX: ::std::os::raw::c_uint, - pub gridDimY: ::std::os::raw::c_uint, - pub gridDimZ: ::std::os::raw::c_uint, - pub blockDimX: ::std::os::raw::c_uint, - pub blockDimY: ::std::os::raw::c_uint, - pub blockDimZ: ::std::os::raw::c_uint, - pub sharedMemBytes: ::std::os::raw::c_uint, - pub hStream: CUstream, - pub kernelParams: *mut *mut ::std::os::raw::c_void, -} -pub type CUDA_LAUNCH_PARAMS = CUDA_LAUNCH_PARAMS_st; -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(1); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(2); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(3); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(4); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(5); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(6); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(7); -} -impl CUexternalMemoryHandleType_enum { - pub const CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF: CUexternalMemoryHandleType_enum = - CUexternalMemoryHandleType_enum(8); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUexternalMemoryHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::CUexternalMemoryHandleType_enum as CUexternalMemoryHandleType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { - pub type_: CUexternalMemoryHandleType, - pub handle: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1, - pub size: ::std::os::raw::c_ulonglong, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1 { - pub fd: ::std::os::raw::c_int, - pub win32: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciBufObject: *const ::std::os::raw::c_void, - _bindgen_union_align: [u64; 2usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - pub handle: *mut ::std::os::raw::c_void, - pub name: *const ::std::os::raw::c_void, -} -pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { - pub offset: ::std::os::raw::c_ulonglong, - pub size: ::std::os::raw::c_ulonglong, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { - pub offset: ::std::os::raw::c_ulonglong, - pub arrayDesc: CUDA_ARRAY3D_DESCRIPTOR, - pub numLevels: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st; -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(1); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(2); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(3); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(4); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(5); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC: CUexternalSemaphoreHandleType_enum = - CUexternalSemaphoreHandleType_enum(6); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(7); -} -impl CUexternalSemaphoreHandleType_enum { - pub const CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT: - CUexternalSemaphoreHandleType_enum = CUexternalSemaphoreHandleType_enum(8); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUexternalSemaphoreHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::CUexternalSemaphoreHandleType_enum as CUexternalSemaphoreHandleType; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { - pub type_: CUexternalSemaphoreHandleType, - pub handle: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1 { - pub fd: ::std::os::raw::c_int, - pub win32: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSyncObj: *const ::std::os::raw::c_void, - _bindgen_union_align: [u64; 2usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - pub handle: *mut ::std::os::raw::c_void, - pub name: *const ::std::os::raw::c_void, -} -pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { - pub params: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 { - pub fence: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2, - pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3, - pub reserved: [::std::os::raw::c_uint; 12usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { - pub value: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_2 { - pub fence: *mut ::std::os::raw::c_void, - pub reserved: ::std::os::raw::c_ulonglong, - _bindgen_union_align: u64, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { - pub key: ::std::os::raw::c_ulonglong, -} -pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { - pub params: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1, - pub flags: ::std::os::raw::c_uint, - pub reserved: [::std::os::raw::c_uint; 16usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 { - pub fence: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1, - pub nvSciSync: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2, - pub keyedMutex: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3, - pub reserved: [::std::os::raw::c_uint; 10usize], -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { - pub value: ::std::os::raw::c_ulonglong, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_2 { - pub fence: *mut ::std::os::raw::c_void, - pub reserved: ::std::os::raw::c_ulonglong, - _bindgen_union_align: u64, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { - pub key: ::std::os::raw::c_ulonglong, - pub timeoutMs: ::std::os::raw::c_uint, -} -pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st; -pub type CUmemGenericAllocationHandle = ::std::os::raw::c_ulonglong; -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(1); -} -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_WIN32: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(2); -} -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_WIN32_KMT: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(4); -} -impl CUmemAllocationHandleType_enum { - pub const CU_MEM_HANDLE_TYPE_MAX: CUmemAllocationHandleType_enum = - CUmemAllocationHandleType_enum(4294967295); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmemAllocationHandleType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAllocationHandleType_enum as CUmemAllocationHandleType; -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_NONE: CUmemAccess_flags_enum = CUmemAccess_flags_enum(0); -} -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_READ: CUmemAccess_flags_enum = CUmemAccess_flags_enum(1); -} -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_READWRITE: CUmemAccess_flags_enum = - CUmemAccess_flags_enum(3); -} -impl CUmemAccess_flags_enum { - pub const CU_MEM_ACCESS_FLAGS_PROT_MAX: CUmemAccess_flags_enum = - CUmemAccess_flags_enum(4294967295); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmemAccess_flags_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAccess_flags_enum as CUmemAccess_flags; -impl CUmemLocationType_enum { - pub const CU_MEM_LOCATION_TYPE_INVALID: CUmemLocationType_enum = CUmemLocationType_enum(0); -} -impl CUmemLocationType_enum { - pub const CU_MEM_LOCATION_TYPE_DEVICE: CUmemLocationType_enum = CUmemLocationType_enum(1); -} -impl CUmemLocationType_enum { - pub const CU_MEM_LOCATION_TYPE_MAX: CUmemLocationType_enum = CUmemLocationType_enum(4294967295); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmemLocationType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemLocationType_enum as CUmemLocationType; -impl CUmemAllocationType_enum { - pub const CU_MEM_ALLOCATION_TYPE_INVALID: CUmemAllocationType_enum = - CUmemAllocationType_enum(0); -} -impl CUmemAllocationType_enum { - pub const CU_MEM_ALLOCATION_TYPE_PINNED: CUmemAllocationType_enum = CUmemAllocationType_enum(1); -} -impl CUmemAllocationType_enum { - pub const CU_MEM_ALLOCATION_TYPE_MAX: CUmemAllocationType_enum = - CUmemAllocationType_enum(4294967295); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmemAllocationType_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAllocationType_enum as CUmemAllocationType; -impl CUmemAllocationGranularity_flags_enum { - pub const CU_MEM_ALLOC_GRANULARITY_MINIMUM: CUmemAllocationGranularity_flags_enum = - CUmemAllocationGranularity_flags_enum(0); -} -impl CUmemAllocationGranularity_flags_enum { - pub const CU_MEM_ALLOC_GRANULARITY_RECOMMENDED: CUmemAllocationGranularity_flags_enum = - CUmemAllocationGranularity_flags_enum(1); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUmemAllocationGranularity_flags_enum(pub ::std::os::raw::c_uint); -pub use self::CUmemAllocationGranularity_flags_enum as CUmemAllocationGranularity_flags; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmemLocation_st { - pub type_: CUmemLocationType, - pub id: ::std::os::raw::c_int, -} -pub type CUmemLocation = CUmemLocation_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmemAllocationProp_st { - pub type_: CUmemAllocationType, - pub requestedHandleTypes: CUmemAllocationHandleType, - pub location: CUmemLocation, - pub win32HandleMetaData: *mut ::std::os::raw::c_void, - pub allocFlags: CUmemAllocationProp_st__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmemAllocationProp_st__bindgen_ty_1 { - pub compressionType: ::std::os::raw::c_uchar, - pub gpuDirectRDMACapable: ::std::os::raw::c_uchar, - pub reserved: [::std::os::raw::c_uchar; 6usize], -} -pub type CUmemAllocationProp = CUmemAllocationProp_st; -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CUmemAccessDesc_st { - pub location: CUmemLocation, - pub flags: CUmemAccess_flags, -} -pub type CUmemAccessDesc = CUmemAccessDesc_st; -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_SUCCESS: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(0); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(1); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(2); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(3); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(4); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(5); -} -impl CUgraphExecUpdateResult_enum { - pub const CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED: CUgraphExecUpdateResult_enum = - CUgraphExecUpdateResult_enum(6); -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CUgraphExecUpdateResult_enum(pub ::std::os::raw::c_uint); -pub use self::CUgraphExecUpdateResult_enum as CUgraphExecUpdateResult; - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuGetErrorString( - CUresult(e): CUresult, - pStr: *mut *const ::std::os::raw::c_char, -) -> CUresult { - *pStr = hipGetErrorString(hipError_t(e)); - CUresult::CUDA_SUCCESS -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGetErrorName( - error: CUresult, - pStr: *mut *const ::std::os::raw::c_char, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuInit(Flags: ::std::os::raw::c_uint) -> CUresult { - unsafe { hipInit(Flags).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDriverGetVersion(driverVersion: *mut ::std::os::raw::c_int) -> CUresult { - // GeekBench checks this value - // TODO: encode something more sensible - unsafe { *driverVersion = r#impl::driver_get_version() }; - CUresult::CUDA_SUCCESS -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGet( - device: *mut CUdevice, - ordinal: ::std::os::raw::c_int, -) -> CUresult { - unsafe { hipDeviceGet(device as _, ordinal).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetCount(count: *mut ::std::os::raw::c_int) -> CUresult { - unsafe { hipGetDeviceCount(count).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetName( - name: *mut ::std::os::raw::c_char, - len: ::std::os::raw::c_int, - CUdevice(dev): CUdevice, -) -> CUresult { - unsafe { hipDeviceGetName(name, len, dev).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetUuid(uuid: *mut CUuuid, dev: CUdevice) -> CUresult { - r#impl::device::get_uuid(uuid, dev.0).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetLuid( - luid: *mut ::std::os::raw::c_char, - deviceNodeMask: *mut ::std::os::raw::c_uint, - dev: CUdevice, -) -> CUresult { - r#impl::device::get_luid(luid, deviceNodeMask, dev.0).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceTotalMem_v2(bytes: *mut usize, CUdevice(dev): CUdevice) -> CUresult { - unsafe { hipDeviceTotalMem(bytes, dev).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetAttribute( - pi: *mut ::std::os::raw::c_int, - attrib: CUdevice_attribute, - CUdevice(dev): CUdevice, -) -> CUresult { - r#impl::device::get_attribute(pi, attrib, dev).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetNvSciSyncAttributes( - nvSciSyncAttrList: *mut ::std::os::raw::c_void, - dev: CUdevice, - flags: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuDeviceGetProperties( - prop: *mut CUdevprop, - dev: CUdevice, -) -> CUresult { - r#impl::device::get_properties(prop, dev).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuDeviceComputeCapability( - major: *mut ::std::os::raw::c_int, - minor: *mut ::std::os::raw::c_int, - dev: CUdevice, -) -> CUresult { - hipDeviceComputeCapability(major, minor, dev.0).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxRetain( - pctx: *mut CUcontext, - CUdevice(dev): CUdevice, -) -> CUresult { - unsafe { hipDevicePrimaryCtxRetain(pctx as _, dev).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxRelease(dev: CUdevice) -> CUresult { - cuDevicePrimaryCtxRelease_v2(dev) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxRelease_v2(CUdevice(dev): CUdevice) -> CUresult { - unsafe { hipDevicePrimaryCtxRelease(dev).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxSetFlags( - dev: CUdevice, - flags: ::std::os::raw::c_uint, -) -> CUresult { - CUresult::CUDA_SUCCESS -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxSetFlags_v2( - dev: CUdevice, - flags: ::std::os::raw::c_uint, -) -> CUresult { - cuDevicePrimaryCtxSetFlags(dev, flags) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxGetState( - CUdevice(dev): CUdevice, - flags: *mut ::std::os::raw::c_uint, - active: *mut ::std::os::raw::c_int, -) -> CUresult { - unsafe { hipDevicePrimaryCtxGetState(dev, flags, active).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxReset(dev: CUdevice) -> CUresult { - cuDevicePrimaryCtxReset_v2(dev) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDevicePrimaryCtxReset_v2(dev: CUdevice) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxCreate_v2( - pctx: *mut CUcontext, - flags: ::std::os::raw::c_uint, - CUdevice(dev): CUdevice, -) -> CUresult { - unsafe { hipCtxCreate(pctx as _, flags, dev).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxDestroy_v2(ctx: CUcontext) -> CUresult { - unsafe { hipCtxDestroy(ctx as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxPushCurrent_v2(ctx: CUcontext) -> CUresult { - unsafe { hipCtxPushCurrent(ctx as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxPopCurrent_v2(pctx: *mut CUcontext) -> CUresult { - unsafe { hipCtxPopCurrent(pctx as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxSetCurrent(ctx: CUcontext) -> CUresult { - unsafe { hipCtxSetCurrent(ctx as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxGetCurrent(pctx: *mut CUcontext) -> CUresult { - unsafe { hipCtxGetCurrent(pctx as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxGetDevice(device: *mut CUdevice) -> CUresult { - unsafe { hipCtxGetDevice(device as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxGetFlags(flags: *mut ::std::os::raw::c_uint) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxSynchronize() -> CUresult { - // hipCtxSynchronize is not implemented - unsafe { hipDeviceSynchronize().into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxSetLimit(limit: CUlimit, value: usize) -> CUresult { - r#impl::context::set_limit(limit, value) -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuCtxGetLimit(pvalue: *mut usize, limit: CUlimit) -> CUresult { - r#impl::context::get_limit(pvalue, limit) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxGetCacheConfig(pconfig: *mut CUfunc_cache) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxSetCacheConfig(config: CUfunc_cache) -> CUresult { - CUresult::CUDA_SUCCESS -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxGetSharedMemConfig(pConfig: *mut CUsharedconfig) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxSetSharedMemConfig(config: CUsharedconfig) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxGetApiVersion( - ctx: CUcontext, - version: *mut ::std::os::raw::c_uint, -) -> CUresult { - unsafe { hipCtxGetApiVersion(ctx as _, version as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxGetStreamPriorityRange( - leastPriority: *mut ::std::os::raw::c_int, - greatestPriority: *mut ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxResetPersistingL2Cache() -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxAttach( - pctx: *mut CUcontext, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxDetach(ctx: CUcontext) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleLoad( - module: *mut CUmodule, - fname: *const ::std::os::raw::c_char, -) -> CUresult { - r#impl::module::load(module, fname).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleLoadData( - module: *mut CUmodule, - image: *const ::std::os::raw::c_void, -) -> CUresult { - r#impl::module::load_data(module, image).encuda() -} - -// TODO: parse jit options -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleLoadDataEx( - module: *mut CUmodule, - image: *const ::std::os::raw::c_void, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, -) -> CUresult { - r#impl::module::load_data(module, image).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleLoadFatBinary( - module: *mut CUmodule, - fatCubin: *const ::std::os::raw::c_void, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleUnload(hmod: CUmodule) -> CUresult { - unsafe { hipModuleUnload(hmod as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleGetFunction( - hfunc: *mut CUfunction, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, -) -> CUresult { - unsafe { hipModuleGetFunction(hfunc as _, hmod as _, name).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuModuleGetGlobal_v2( - dptr: *mut CUdeviceptr, - bytes: *mut usize, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, -) -> CUresult { - hipModuleGetGlobal(dptr as _, bytes, hmod as _, name).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleGetTexRef( - pTexRef: *mut CUtexref, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuModuleGetSurfRef( - pSurfRef: *mut CUsurfref, - hmod: CUmodule, - name: *const ::std::os::raw::c_char, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuLinkCreate_v2( - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, - stateOut: *mut CUlinkState, -) -> CUresult { - r#impl::link::create(numOptions, options, optionValues, stateOut) -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuLinkAddData_v2( - state: CUlinkState, - type_: CUjitInputType, - data: *mut ::std::os::raw::c_void, - size: usize, - name: *const ::std::os::raw::c_char, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, -) -> CUresult { - r#impl::link::add_data( - state, - type_, - data, - size, - name, - numOptions, - options, - optionValues, - ) - .encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLinkAddFile_v2( - state: CUlinkState, - type_: CUjitInputType, - path: *const ::std::os::raw::c_char, - numOptions: ::std::os::raw::c_uint, - options: *mut CUjit_option, - optionValues: *mut *mut ::std::os::raw::c_void, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuLinkComplete( - state: CUlinkState, - cubinOut: *mut *mut ::std::os::raw::c_void, - sizeOut: *mut usize, -) -> CUresult { - r#impl::link::complete(state, cubinOut, sizeOut).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuLinkDestroy(state: CUlinkState) -> CUresult { - r#impl::link::destroy(state) -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuMemGetInfo_v2(free: *mut usize, total: *mut usize) -> CUresult { - hipMemGetInfo(free, total).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemAlloc_v2(dptr: *mut CUdeviceptr, bytesize: usize) -> CUresult { - let mut dev_ptr = std::ptr::null_mut(); - let err = unsafe { hipMalloc(&mut dev_ptr, bytesize) }.into(); - if err != CUresult::CUDA_SUCCESS { - return err; - } - // HACK ALERT: GeekBench is buggy and sometimes assumes that buffers are zeroed-out on creation - let err = unsafe { hipMemsetD8(dev_ptr, 0, bytesize) }.into(); - /* - let bytesize_rounded_down = bytesize & !3usize; - let bytes = usize::min(bytesize_rounded_down, 4096); - let err = unsafe { hipMemsetD32(dev_ptr, 0, bytes / 1024).into() }; - */ - if err != CUresult::CUDA_SUCCESS { - return err; - } - unsafe { *dptr = CUdeviceptr(dev_ptr as usize) }; - CUresult::CUDA_SUCCESS -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemAllocPitch_v2( - dptr: *mut CUdeviceptr, - pPitch: *mut usize, - WidthInBytes: usize, - Height: usize, - ElementSizeBytes: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemFree_v2(dptr: CUdeviceptr) -> CUresult { - unsafe { hipFree(dptr.0 as _).into() } -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemGetAddressRange_v2( - pbase: *mut CUdeviceptr, - psize: *mut usize, - dptr: CUdeviceptr, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemAllocHost_v2( - pp: *mut *mut ::std::os::raw::c_void, - bytesize: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuMemFreeHost(p: *mut ::std::os::raw::c_void) -> CUresult { - hipFreeHost(p).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuMemHostAlloc( - pp: *mut *mut ::std::os::raw::c_void, - bytesize: usize, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - hipMemAllocHost(pp, bytesize).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemHostGetDevicePointer_v2( - pdptr: *mut CUdeviceptr, - p: *mut ::std::os::raw::c_void, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemHostGetFlags( - pFlags: *mut ::std::os::raw::c_uint, - p: *mut ::std::os::raw::c_void, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemAllocManaged( - dptr: *mut CUdeviceptr, - bytesize: usize, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetByPCIBusId( - dev: *mut CUdevice, - pciBusId: *const ::std::os::raw::c_char, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetPCIBusId( - pciBusId: *mut ::std::os::raw::c_char, - len: ::std::os::raw::c_int, - dev: CUdevice, -) -> CUresult { - unsafe { hipDeviceGetPCIBusId(pciBusId, len, dev.0) }.into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuIpcGetEventHandle( - pHandle: *mut CUipcEventHandle, - event: CUevent, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuIpcOpenEventHandle( - phEvent: *mut CUevent, - handle: CUipcEventHandle, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuIpcGetMemHandle( - pHandle: *mut CUipcMemHandle, - dptr: CUdeviceptr, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuIpcOpenMemHandle( - pdptr: *mut CUdeviceptr, - handle: CUipcMemHandle, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuIpcOpenMemHandle_v2( - pdptr: *mut CUdeviceptr, - handle: CUipcMemHandle, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuIpcCloseMemHandle(dptr: CUdeviceptr) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemHostRegister_v2( - p: *mut ::std::os::raw::c_void, - bytesize: usize, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemHostUnregister(p: *mut ::std::os::raw::c_void) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpy(dst: CUdeviceptr, src: CUdeviceptr, ByteCount: usize) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyPeer( - dstDevice: CUdeviceptr, - dstContext: CUcontext, - srcDevice: CUdeviceptr, - srcContext: CUcontext, - ByteCount: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyHtoD_v2( - dstDevice: CUdeviceptr, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, -) -> CUresult { - unsafe { hipMemcpyHtoD(dstDevice.0 as _, srcHost as _, ByteCount).into() } -} - -// TODO: implement default stream semantics -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyHtoD_v2_ptds( - dstDevice: CUdeviceptr, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, -) -> CUresult { - cuMemcpyHtoD_v2(dstDevice, srcHost, ByteCount) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyDtoH_v2( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr, - ByteCount: usize, -) -> CUresult { - unsafe { hipMemcpyDtoH(dstHost as _, srcDevice.0 as _, ByteCount).into() } -} - -// TODO: implement default stream semantics -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyDtoH_v2_ptds( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr, - ByteCount: usize, -) -> CUresult { - cuMemcpyDtoH_v2(dstHost, srcDevice, ByteCount) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyDtoD_v2( - dstDevice: CUdeviceptr, - srcDevice: CUdeviceptr, - ByteCount: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyDtoA_v2( - dstArray: CUarray, - dstOffset: usize, - srcDevice: CUdeviceptr, - ByteCount: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyAtoD_v2( - dstDevice: CUdeviceptr, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyHtoA_v2( - dstArray: CUarray, - dstOffset: usize, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyAtoH_v2( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyAtoA_v2( - dstArray: CUarray, - dstOffset: usize, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpy2D_v2(pCopy: *const CUDA_MEMCPY2D) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpy2DUnaligned_v2(pCopy: *const CUDA_MEMCPY2D) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuMemcpy3D_v2(pCopy: *const CUDA_MEMCPY3D) -> CUresult { - r#impl::memory::copy_3d(pCopy).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpy3DPeer(pCopy: *const CUDA_MEMCPY3D_PEER) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyAsync( - dst: CUdeviceptr, - src: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyPeerAsync( - dstDevice: CUdeviceptr, - dstContext: CUcontext, - srcDevice: CUdeviceptr, - srcContext: CUcontext, - ByteCount: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuMemcpyHtoDAsync_v2( - dstDevice: CUdeviceptr, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - hStream: CUstream, -) -> CUresult { - hipMemcpyHtoDAsync(dstDevice.0 as _, srcHost as _, ByteCount, hStream as _).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyDtoHAsync_v2( - dstHost: *mut ::std::os::raw::c_void, - srcDevice: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyDtoDAsync_v2( - dstDevice: CUdeviceptr, - srcDevice: CUdeviceptr, - ByteCount: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyHtoAAsync_v2( - dstArray: CUarray, - dstOffset: usize, - srcHost: *const ::std::os::raw::c_void, - ByteCount: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpyAtoHAsync_v2( - dstHost: *mut ::std::os::raw::c_void, - srcArray: CUarray, - srcOffset: usize, - ByteCount: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpy2DAsync_v2( - pCopy: *const CUDA_MEMCPY2D, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpy3DAsync_v2( - pCopy: *const CUDA_MEMCPY3D, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemcpy3DPeerAsync( - pCopy: *const CUDA_MEMCPY3D_PEER, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD8_v2( - dstDevice: CUdeviceptr, - uc: ::std::os::raw::c_uchar, - N: usize, -) -> CUresult { - unsafe { hipMemsetD8(dstDevice.0 as _, uc, N).into() } -} - -// TODO: implement default stream semantics -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD8_v2_ptds( - dstDevice: CUdeviceptr, - uc: ::std::os::raw::c_uchar, - N: usize, -) -> CUresult { - cuMemsetD8_v2(dstDevice, uc, N) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD16_v2( - dstDevice: CUdeviceptr, - us: ::std::os::raw::c_ushort, - N: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD32_v2( - dstDevice: CUdeviceptr, - ui: ::std::os::raw::c_uint, - N: usize, -) -> CUresult { - unsafe { hipMemsetD32(dstDevice.0 as _, ui as _, N).into() } -} - -// TODO: implement default stream semantics -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD32_v2_ptds( - dstDevice: CUdeviceptr, - ui: ::std::os::raw::c_uint, - N: usize, -) -> CUresult { - cuMemsetD32_v2(dstDevice, ui, N) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD2D8_v2( - dstDevice: CUdeviceptr, - dstPitch: usize, - uc: ::std::os::raw::c_uchar, - Width: usize, - Height: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD2D16_v2( - dstDevice: CUdeviceptr, - dstPitch: usize, - us: ::std::os::raw::c_ushort, - Width: usize, - Height: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD2D32_v2( - dstDevice: CUdeviceptr, - dstPitch: usize, - ui: ::std::os::raw::c_uint, - Width: usize, - Height: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD8Async( - dstDevice: CUdeviceptr, - uc: ::std::os::raw::c_uchar, - N: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD16Async( - dstDevice: CUdeviceptr, - us: ::std::os::raw::c_ushort, - N: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD32Async( - dstDevice: CUdeviceptr, - ui: ::std::os::raw::c_uint, - N: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD2D8Async( - dstDevice: CUdeviceptr, - dstPitch: usize, - uc: ::std::os::raw::c_uchar, - Width: usize, - Height: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD2D16Async( - dstDevice: CUdeviceptr, - dstPitch: usize, - us: ::std::os::raw::c_ushort, - Width: usize, - Height: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemsetD2D32Async( - dstDevice: CUdeviceptr, - dstPitch: usize, - ui: ::std::os::raw::c_uint, - Width: usize, - Height: usize, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuArrayCreate_v2( - pHandle: *mut CUarray, - pAllocateArray: *const CUDA_ARRAY_DESCRIPTOR, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuArrayGetDescriptor_v2( - pArrayDescriptor: *mut CUDA_ARRAY_DESCRIPTOR, - hArray: CUarray, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuArrayDestroy(hArray: CUarray) -> CUresult { - hipArrayDestroy(hArray as _).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuArray3DCreate_v2( - pHandle: *mut CUarray, - pAllocateArray: *const CUDA_ARRAY3D_DESCRIPTOR, -) -> CUresult { - hipArray3DCreate(pHandle as _, pAllocateArray as _).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuArray3DGetDescriptor_v2( - pArrayDescriptor: *mut CUDA_ARRAY3D_DESCRIPTOR, - hArray: CUarray, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMipmappedArrayCreate( - pHandle: *mut CUmipmappedArray, - pMipmappedArrayDesc: *const CUDA_ARRAY3D_DESCRIPTOR, - numMipmapLevels: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMipmappedArrayGetLevel( - pLevelArray: *mut CUarray, - hMipmappedArray: CUmipmappedArray, - level: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMipmappedArrayDestroy(hMipmappedArray: CUmipmappedArray) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemAddressReserve( - ptr: *mut CUdeviceptr, - size: usize, - alignment: usize, - addr: CUdeviceptr, - flags: ::std::os::raw::c_ulonglong, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemAddressFree(ptr: CUdeviceptr, size: usize) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemCreate( - handle: *mut CUmemGenericAllocationHandle, - size: usize, - prop: *const CUmemAllocationProp, - flags: ::std::os::raw::c_ulonglong, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemRelease(handle: CUmemGenericAllocationHandle) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemMap( - ptr: CUdeviceptr, - size: usize, - offset: usize, - handle: CUmemGenericAllocationHandle, - flags: ::std::os::raw::c_ulonglong, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemUnmap(ptr: CUdeviceptr, size: usize) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemSetAccess( - ptr: CUdeviceptr, - size: usize, - desc: *const CUmemAccessDesc, - count: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemGetAccess( - flags: *mut ::std::os::raw::c_ulonglong, - location: *const CUmemLocation, - ptr: CUdeviceptr, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemExportToShareableHandle( - shareableHandle: *mut ::std::os::raw::c_void, - handle: CUmemGenericAllocationHandle, - handleType: CUmemAllocationHandleType, - flags: ::std::os::raw::c_ulonglong, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemImportFromShareableHandle( - handle: *mut CUmemGenericAllocationHandle, - osHandle: *mut ::std::os::raw::c_void, - shHandleType: CUmemAllocationHandleType, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemGetAllocationGranularity( - granularity: *mut usize, - prop: *const CUmemAllocationProp, - option: CUmemAllocationGranularity_flags, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemGetAllocationPropertiesFromHandle( - prop: *mut CUmemAllocationProp, - handle: CUmemGenericAllocationHandle, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemRetainAllocationHandle( - handle: *mut CUmemGenericAllocationHandle, - addr: *mut ::std::os::raw::c_void, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub unsafe extern "system" fn cuPointerGetAttribute( - data: *mut ::std::os::raw::c_void, - attribute: CUpointer_attribute, - ptr: CUdeviceptr, -) -> CUresult { - r#impl::pointer::get_attribute(data, attribute, ptr).encuda() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemPrefetchAsync( - devPtr: CUdeviceptr, - count: usize, - dstDevice: CUdevice, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemAdvise( - devPtr: CUdeviceptr, - count: usize, - advice: CUmem_advise, - device: CUdevice, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemRangeGetAttribute( - data: *mut ::std::os::raw::c_void, - dataSize: usize, - attribute: CUmem_range_attribute, - devPtr: CUdeviceptr, - count: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuMemRangeGetAttributes( - data: *mut *mut ::std::os::raw::c_void, - dataSizes: *mut usize, - attributes: *mut CUmem_range_attribute, - numAttributes: usize, - devPtr: CUdeviceptr, - count: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuPointerSetAttribute( - value: *const ::std::os::raw::c_void, - attribute: CUpointer_attribute, - ptr: CUdeviceptr, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuPointerGetAttributes( - numAttributes: ::std::os::raw::c_uint, - attributes: *mut CUpointer_attribute, - data: *mut *mut ::std::os::raw::c_void, - ptr: CUdeviceptr, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamCreate( - phStream: *mut CUstream, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - unsafe { hipStreamCreateWithFlags(phStream as _, Flags) }.into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamCreateWithPriority( - phStream: *mut CUstream, - flags: ::std::os::raw::c_uint, - priority: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamGetPriority( - hStream: CUstream, - priority: *mut ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamGetFlags( - hStream: CUstream, - flags: *mut ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamGetCtx(hStream: CUstream, pctx: *mut CUcontext) -> CUresult { - unsafe { hipStreamGetCtx(hStream as _, pctx as _) }.into() -} - -// TODO: implement default stream semantics -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamGetCtx_ptsz(hStream: CUstream, pctx: *mut CUcontext) -> CUresult { - cuStreamGetCtx(hStream, pctx) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamWaitEvent( - hStream: CUstream, - hEvent: CUevent, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamAddCallback( - hStream: CUstream, - callback: CUstreamCallback, - userData: *mut ::std::os::raw::c_void, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamBeginCapture_v2( - hStream: CUstream, - mode: CUstreamCaptureMode, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuThreadExchangeStreamCaptureMode( - mode: *mut CUstreamCaptureMode, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamEndCapture(hStream: CUstream, phGraph: *mut CUgraph) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamIsCapturing( - hStream: CUstream, - captureStatus: *mut CUstreamCaptureStatus, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamGetCaptureInfo( - hStream: CUstream, - captureStatus: *mut CUstreamCaptureStatus, - id: *mut cuuint64_t, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamAttachMemAsync( - hStream: CUstream, - dptr: CUdeviceptr, - length: usize, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamQuery(hStream: CUstream) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamSynchronize(hStream: CUstream) -> CUresult { - unsafe { hipStreamSynchronize(hStream as _) }.into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamDestroy_v2(hStream: CUstream) -> CUresult { - unsafe { hipStreamDestroy(hStream as _) }.into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamCopyAttributes(dst: CUstream, src: CUstream) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamGetAttribute( - hStream: CUstream, - attr: CUstreamAttrID, - value_out: *mut CUstreamAttrValue, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamSetAttribute( - hStream: CUstream, - attr: CUstreamAttrID, - value: *const CUstreamAttrValue, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuEventCreate( - phEvent: *mut CUevent, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuEventRecord(hEvent: CUevent, hStream: CUstream) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuEventQuery(hEvent: CUevent) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuEventSynchronize(hEvent: CUevent) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuEventDestroy_v2(hEvent: CUevent) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuEventElapsedTime( - pMilliseconds: *mut f32, - hStart: CUevent, - hEnd: CUevent, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuImportExternalMemory( - extMem_out: *mut CUexternalMemory, - memHandleDesc: *const CUDA_EXTERNAL_MEMORY_HANDLE_DESC, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuExternalMemoryGetMappedBuffer( - devPtr: *mut CUdeviceptr, - extMem: CUexternalMemory, - bufferDesc: *const CUDA_EXTERNAL_MEMORY_BUFFER_DESC, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuExternalMemoryGetMappedMipmappedArray( - mipmap: *mut CUmipmappedArray, - extMem: CUexternalMemory, - mipmapDesc: *const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDestroyExternalMemory(extMem: CUexternalMemory) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuImportExternalSemaphore( - extSem_out: *mut CUexternalSemaphore, - semHandleDesc: *const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuSignalExternalSemaphoresAsync( - extSemArray: *const CUexternalSemaphore, - paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, - numExtSems: ::std::os::raw::c_uint, - stream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuWaitExternalSemaphoresAsync( - extSemArray: *const CUexternalSemaphore, - paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, - numExtSems: ::std::os::raw::c_uint, - stream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDestroyExternalSemaphore(extSem: CUexternalSemaphore) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamWaitValue32( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint32_t, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamWaitValue64( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint64_t, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamWriteValue32( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint32_t, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamWriteValue64( - stream: CUstream, - addr: CUdeviceptr, - value: cuuint64_t, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuStreamBatchMemOp( - stream: CUstream, - count: ::std::os::raw::c_uint, - paramArray: *mut CUstreamBatchMemOpParams, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuFuncGetAttribute( - pi: *mut ::std::os::raw::c_int, - attrib: CUfunction_attribute, - hfunc: CUfunction, -) -> CUresult { - r#impl::function::get_attribute(pi, attrib, hfunc).into() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuFuncSetAttribute( - hfunc: CUfunction, - attrib: CUfunction_attribute, - value: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuFuncSetCacheConfig(hfunc: CUfunction, config: CUfunc_cache) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuFuncSetSharedMemConfig( - hfunc: CUfunction, - config: CUsharedconfig, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunchKernel( - f: CUfunction, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - hStream: CUstream, - kernelParams: *mut *mut ::std::os::raw::c_void, - extra: *mut *mut ::std::os::raw::c_void, -) -> CUresult { - unsafe { - hipModuleLaunchKernel( - f as _, - gridDimX, - gridDimY, - gridDimZ, - blockDimX, - blockDimY, - blockDimZ, - sharedMemBytes, - hStream as _, - kernelParams, - extra, - ) - } - .into() -} - -// TODO: implement default stream semantics -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunchKernel_ptsz( - f: CUfunction, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - hStream: CUstream, - kernelParams: *mut *mut ::std::os::raw::c_void, - extra: *mut *mut ::std::os::raw::c_void, -) -> CUresult { - cuLaunchKernel( - f, - gridDimX, - gridDimY, - gridDimZ, - blockDimX, - blockDimY, - blockDimZ, - sharedMemBytes, - hStream, - kernelParams, - extra, - ) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunchCooperativeKernel( - f: CUfunction, - gridDimX: ::std::os::raw::c_uint, - gridDimY: ::std::os::raw::c_uint, - gridDimZ: ::std::os::raw::c_uint, - blockDimX: ::std::os::raw::c_uint, - blockDimY: ::std::os::raw::c_uint, - blockDimZ: ::std::os::raw::c_uint, - sharedMemBytes: ::std::os::raw::c_uint, - hStream: CUstream, - kernelParams: *mut *mut ::std::os::raw::c_void, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunchCooperativeKernelMultiDevice( - launchParamsList: *mut CUDA_LAUNCH_PARAMS, - numDevices: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunchHostFunc( - hStream: CUstream, - fn_: CUhostFn, - userData: *mut ::std::os::raw::c_void, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuFuncSetBlockShape( - hfunc: CUfunction, - x: ::std::os::raw::c_int, - y: ::std::os::raw::c_int, - z: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuFuncSetSharedSize( - hfunc: CUfunction, - bytes: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuParamSetSize( - hfunc: CUfunction, - numbytes: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuParamSeti( - hfunc: CUfunction, - offset: ::std::os::raw::c_int, - value: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuParamSetf( - hfunc: CUfunction, - offset: ::std::os::raw::c_int, - value: f32, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuParamSetv( - hfunc: CUfunction, - offset: ::std::os::raw::c_int, - ptr: *mut ::std::os::raw::c_void, - numbytes: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunch(f: CUfunction) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunchGrid( - f: CUfunction, - grid_width: ::std::os::raw::c_int, - grid_height: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuLaunchGridAsync( - f: CUfunction, - grid_width: ::std::os::raw::c_int, - grid_height: ::std::os::raw::c_int, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuParamSetTexRef( - hfunc: CUfunction, - texunit: ::std::os::raw::c_int, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphCreate( - phGraph: *mut CUgraph, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphAddKernelNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - nodeParams: *const CUDA_KERNEL_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphKernelNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_KERNEL_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphKernelNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_KERNEL_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphAddMemcpyNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - copyParams: *const CUDA_MEMCPY3D, - ctx: CUcontext, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphMemcpyNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_MEMCPY3D, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphMemcpyNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_MEMCPY3D, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphAddMemsetNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - memsetParams: *const CUDA_MEMSET_NODE_PARAMS, - ctx: CUcontext, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphMemsetNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_MEMSET_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphMemsetNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_MEMSET_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphAddHostNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - nodeParams: *const CUDA_HOST_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphHostNodeGetParams( - hNode: CUgraphNode, - nodeParams: *mut CUDA_HOST_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphHostNodeSetParams( - hNode: CUgraphNode, - nodeParams: *const CUDA_HOST_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphAddChildGraphNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, - childGraph: CUgraph, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphChildGraphNodeGetGraph( - hNode: CUgraphNode, - phGraph: *mut CUgraph, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphAddEmptyNode( - phGraphNode: *mut CUgraphNode, - hGraph: CUgraph, - dependencies: *const CUgraphNode, - numDependencies: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphClone( - phGraphClone: *mut CUgraph, - originalGraph: CUgraph, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphNodeFindInClone( - phNode: *mut CUgraphNode, - hOriginalNode: CUgraphNode, - hClonedGraph: CUgraph, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphNodeGetType( - hNode: CUgraphNode, - type_: *mut CUgraphNodeType, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphGetNodes( - hGraph: CUgraph, - nodes: *mut CUgraphNode, - numNodes: *mut usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphGetRootNodes( - hGraph: CUgraph, - rootNodes: *mut CUgraphNode, - numRootNodes: *mut usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphGetEdges( - hGraph: CUgraph, - from: *mut CUgraphNode, - to: *mut CUgraphNode, - numEdges: *mut usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphNodeGetDependencies( - hNode: CUgraphNode, - dependencies: *mut CUgraphNode, - numDependencies: *mut usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphNodeGetDependentNodes( - hNode: CUgraphNode, - dependentNodes: *mut CUgraphNode, - numDependentNodes: *mut usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphAddDependencies( - hGraph: CUgraph, - from: *const CUgraphNode, - to: *const CUgraphNode, - numDependencies: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphRemoveDependencies( - hGraph: CUgraph, - from: *const CUgraphNode, - to: *const CUgraphNode, - numDependencies: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphDestroyNode(hNode: CUgraphNode) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphInstantiate_v2( - phGraphExec: *mut CUgraphExec, - hGraph: CUgraph, - phErrorNode: *mut CUgraphNode, - logBuffer: *mut ::std::os::raw::c_char, - bufferSize: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphExecKernelNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - nodeParams: *const CUDA_KERNEL_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphExecMemcpyNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - copyParams: *const CUDA_MEMCPY3D, - ctx: CUcontext, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphExecMemsetNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - memsetParams: *const CUDA_MEMSET_NODE_PARAMS, - ctx: CUcontext, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphExecHostNodeSetParams( - hGraphExec: CUgraphExec, - hNode: CUgraphNode, - nodeParams: *const CUDA_HOST_NODE_PARAMS, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphLaunch(hGraphExec: CUgraphExec, hStream: CUstream) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphExecDestroy(hGraphExec: CUgraphExec) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphDestroy(hGraph: CUgraph) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphExecUpdate( - hGraphExec: CUgraphExec, - hGraph: CUgraph, - hErrorNode_out: *mut CUgraphNode, - updateResult_out: *mut CUgraphExecUpdateResult, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphKernelNodeCopyAttributes( - dst: CUgraphNode, - src: CUgraphNode, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphKernelNodeGetAttribute( - hNode: CUgraphNode, - attr: CUkernelNodeAttrID, - value_out: *mut CUkernelNodeAttrValue, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphKernelNodeSetAttribute( - hNode: CUgraphNode, - attr: CUkernelNodeAttrID, - value: *const CUkernelNodeAttrValue, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuOccupancyMaxActiveBlocksPerMultiprocessor( - numBlocks: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSize: ::std::os::raw::c_int, - dynamicSMemSize: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( - numBlocks: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSize: ::std::os::raw::c_int, - dynamicSMemSize: usize, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuOccupancyMaxPotentialBlockSize( - minGridSize: *mut ::std::os::raw::c_int, - blockSize: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSizeToDynamicSMemSize: CUoccupancyB2DSize, - dynamicSMemSize: usize, - blockSizeLimit: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuOccupancyMaxPotentialBlockSizeWithFlags( - minGridSize: *mut ::std::os::raw::c_int, - blockSize: *mut ::std::os::raw::c_int, - func: CUfunction, - blockSizeToDynamicSMemSize: CUoccupancyB2DSize, - dynamicSMemSize: usize, - blockSizeLimit: ::std::os::raw::c_int, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuOccupancyAvailableDynamicSMemPerBlock( - dynamicSmemSize: *mut usize, - func: CUfunction, - numBlocks: ::std::os::raw::c_int, - blockSize: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetArray( - hTexRef: CUtexref, - hArray: CUarray, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetMipmappedArray( - hTexRef: CUtexref, - hMipmappedArray: CUmipmappedArray, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetAddress_v2( - ByteOffset: *mut usize, - hTexRef: CUtexref, - dptr: CUdeviceptr, - bytes: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetAddress2D_v3( - hTexRef: CUtexref, - desc: *const CUDA_ARRAY_DESCRIPTOR, - dptr: CUdeviceptr, - Pitch: usize, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetFormat( - hTexRef: CUtexref, - fmt: CUarray_format, - NumPackedComponents: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetAddressMode( - hTexRef: CUtexref, - dim: ::std::os::raw::c_int, - am: CUaddress_mode, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetFilterMode(hTexRef: CUtexref, fm: CUfilter_mode) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetMipmapFilterMode( - hTexRef: CUtexref, - fm: CUfilter_mode, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetMipmapLevelBias(hTexRef: CUtexref, bias: f32) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetMipmapLevelClamp( - hTexRef: CUtexref, - minMipmapLevelClamp: f32, - maxMipmapLevelClamp: f32, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetMaxAnisotropy( - hTexRef: CUtexref, - maxAniso: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetBorderColor( - hTexRef: CUtexref, - pBorderColor: *mut f32, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefSetFlags( - hTexRef: CUtexref, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetAddress_v2( - pdptr: *mut CUdeviceptr, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetArray(phArray: *mut CUarray, hTexRef: CUtexref) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetMipmappedArray( - phMipmappedArray: *mut CUmipmappedArray, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetAddressMode( - pam: *mut CUaddress_mode, - hTexRef: CUtexref, - dim: ::std::os::raw::c_int, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetFilterMode( - pfm: *mut CUfilter_mode, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetFormat( - pFormat: *mut CUarray_format, - pNumChannels: *mut ::std::os::raw::c_int, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetMipmapFilterMode( - pfm: *mut CUfilter_mode, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetMipmapLevelBias(pbias: *mut f32, hTexRef: CUtexref) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetMipmapLevelClamp( - pminMipmapLevelClamp: *mut f32, - pmaxMipmapLevelClamp: *mut f32, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetMaxAnisotropy( - pmaxAniso: *mut ::std::os::raw::c_int, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetBorderColor( - pBorderColor: *mut f32, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefGetFlags( - pFlags: *mut ::std::os::raw::c_uint, - hTexRef: CUtexref, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefCreate(pTexRef: *mut CUtexref) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexRefDestroy(hTexRef: CUtexref) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuSurfRefSetArray( - hSurfRef: CUsurfref, - hArray: CUarray, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuSurfRefGetArray(phArray: *mut CUarray, hSurfRef: CUsurfref) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexObjectCreate( - pTexObject: *mut CUtexObject, - pResDesc: *const CUDA_RESOURCE_DESC, - pTexDesc: *const CUDA_TEXTURE_DESC, - pResViewDesc: *const CUDA_RESOURCE_VIEW_DESC, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexObjectDestroy(texObject: CUtexObject) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexObjectGetResourceDesc( - pResDesc: *mut CUDA_RESOURCE_DESC, - texObject: CUtexObject, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexObjectGetTextureDesc( - pTexDesc: *mut CUDA_TEXTURE_DESC, - texObject: CUtexObject, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuTexObjectGetResourceViewDesc( - pResViewDesc: *mut CUDA_RESOURCE_VIEW_DESC, - texObject: CUtexObject, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuSurfObjectCreate( - pSurfObject: *mut CUsurfObject, - pResDesc: *const CUDA_RESOURCE_DESC, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuSurfObjectDestroy(surfObject: CUsurfObject) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuSurfObjectGetResourceDesc( - pResDesc: *mut CUDA_RESOURCE_DESC, - surfObject: CUsurfObject, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceCanAccessPeer( - canAccessPeer: *mut ::std::os::raw::c_int, - dev: CUdevice, - peerDev: CUdevice, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxEnablePeerAccess( - peerContext: CUcontext, - Flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuCtxDisablePeerAccess(peerContext: CUcontext) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuDeviceGetP2PAttribute( - value: *mut ::std::os::raw::c_int, - attrib: CUdevice_P2PAttribute, - srcDevice: CUdevice, - dstDevice: CUdevice, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphicsUnregisterResource(resource: CUgraphicsResource) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphicsSubResourceGetMappedArray( - pArray: *mut CUarray, - resource: CUgraphicsResource, - arrayIndex: ::std::os::raw::c_uint, - mipLevel: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphicsResourceGetMappedMipmappedArray( - pMipmappedArray: *mut CUmipmappedArray, - resource: CUgraphicsResource, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphicsResourceGetMappedPointer_v2( - pDevPtr: *mut CUdeviceptr, - pSize: *mut usize, - resource: CUgraphicsResource, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphicsResourceSetMapFlags_v2( - resource: CUgraphicsResource, - flags: ::std::os::raw::c_uint, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphicsMapResources( - count: ::std::os::raw::c_uint, - resources: *mut CUgraphicsResource, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGraphicsUnmapResources( - count: ::std::os::raw::c_uint, - resources: *mut CUgraphicsResource, - hStream: CUstream, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuGetExportTable( - ppExportTable: *mut *const ::std::os::raw::c_void, - pExportTableId: *const CUuuid, -) -> CUresult { - r#impl::export_table::get(ppExportTable, pExportTableId) -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuFuncGetModule(hmod: *mut CUmodule, hfunc: CUfunction) -> CUresult { - r#impl::unimplemented() -} - -impl CUoutput_mode_enum { - pub const CU_OUT_KEY_VALUE_PAIR: CUoutput_mode_enum = CUoutput_mode_enum(0); -} -impl CUoutput_mode_enum { - pub const CU_OUT_CSV: CUoutput_mode_enum = CUoutput_mode_enum(1); -} -#[repr(transparent)] -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub struct CUoutput_mode_enum(pub ::std::os::raw::c_uint); -pub use self::CUoutput_mode_enum as CUoutput_mode; - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuProfilerInitialize( - configFile: *const ::std::os::raw::c_char, - outputFile: *const ::std::os::raw::c_char, - outputMode: CUoutput_mode, -) -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuProfilerStart() -> CUresult { - r#impl::unimplemented() -} - -#[cfg_attr(not(test), no_mangle)] -pub extern "system" fn cuProfilerStop() -> CUresult { - r#impl::unimplemented() -} diff --git a/zluda/src/impl/context.rs b/zluda/src/impl/context.rs index fffceb82..973febcf 100644 --- a/zluda/src/impl/context.rs +++ b/zluda/src/impl/context.rs @@ -1,24 +1,93 @@ -use std::ptr; +use super::{driver, FromCuda, ZludaObject}; +use cuda_types::*; +use hip_runtime_sys::*; +use rustc_hash::FxHashSet; +use std::{cell::RefCell, ptr, sync::Mutex}; -use crate::cuda::CUlimit; -use crate::cuda::CUresult; +thread_local! { + pub(crate) static CONTEXT_STACK: RefCell> = RefCell::new(Vec::new()); +} + +pub(crate) struct Context { + pub(crate) device: hipDevice_t, + pub(crate) mutable: Mutex, +} + +pub(crate) struct OwnedByContext { + pub(crate) ref_count: usize, // only used by primary context + pub(crate) _memory: FxHashSet, + pub(crate) _streams: FxHashSet, + pub(crate) _modules: FxHashSet, +} -pub(crate) unsafe fn get_limit(pvalue: *mut usize, limit: CUlimit) -> CUresult { - if pvalue == ptr::null_mut() { - return CUresult::CUDA_ERROR_INVALID_VALUE; +impl ZludaObject for Context { + const COOKIE: usize = 0x5f867c6d9cb73315; + + type CudaHandle = CUcontext; + + fn drop_checked(&mut self) -> CUresult { + Ok(()) } - if limit == CUlimit::CU_LIMIT_STACK_SIZE { - *pvalue = 512; // GTX 1060 reports 1024 - CUresult::CUDA_SUCCESS - } else { - CUresult::CUDA_ERROR_NOT_SUPPORTED +} + +pub(crate) fn new(device: hipDevice_t) -> Context { + Context { + device, + mutable: Mutex::new(OwnedByContext { + ref_count: 0, + _memory: FxHashSet::default(), + _streams: FxHashSet::default(), + _modules: FxHashSet::default(), + }), } } -pub(crate) fn set_limit(limit: CUlimit, value: usize) -> CUresult { - if limit == CUlimit::CU_LIMIT_STACK_SIZE { - CUresult::CUDA_SUCCESS +pub(crate) unsafe fn get_limit(pvalue: *mut usize, limit: hipLimit_t) -> hipError_t { + unsafe { hipDeviceGetLimit(pvalue, limit) } +} + +pub(crate) fn set_limit(limit: hipLimit_t, value: usize) -> hipError_t { + unsafe { hipDeviceSetLimit(limit, value) } +} + +pub(crate) fn synchronize() -> hipError_t { + unsafe { hipDeviceSynchronize() } +} + +pub(crate) fn get_primary(hip_dev: hipDevice_t) -> Result<(&'static Context, CUcontext), CUerror> { + let dev = driver::device(hip_dev)?; + Ok(dev.primary_context()) +} + +pub(crate) fn set_current(raw_ctx: CUcontext) -> CUresult { + let new_device = if raw_ctx.0 == ptr::null_mut() { + CONTEXT_STACK.with(|stack| { + let mut stack = stack.borrow_mut(); + if let Some((_, old_device)) = stack.pop() { + if let Some((_, new_device)) = stack.last() { + if old_device != *new_device { + return Some(*new_device); + } + } + } + None + }) } else { - CUresult::CUDA_ERROR_NOT_SUPPORTED + let ctx: &Context = FromCuda::from_cuda(&raw_ctx)?; + let device = ctx.device; + CONTEXT_STACK.with(move |stack| { + let mut stack = stack.borrow_mut(); + let last_device = stack.last().map(|(_, dev)| *dev); + stack.push((raw_ctx, device)); + match last_device { + None => Some(device), + Some(last_device) if last_device != device => Some(device), + _ => None, + } + }) + }; + if let Some(dev) = new_device { + unsafe { hipSetDevice(dev)? }; } + Ok(()) } diff --git a/zluda/src/impl/device.rs b/zluda/src/impl/device.rs index 0c63494c..8836c1ee 100644 --- a/zluda/src/impl/device.rs +++ b/zluda/src/impl/device.rs @@ -1,29 +1,27 @@ -use super::{transmute_lifetime, transmute_lifetime_mut, CUresult}; -use crate::{ - cuda::{self, CUdevice, CUdevprop}, - hip_call, -}; -use cuda::{CUdevice_attribute, CUuuid_st}; -use hip_runtime_sys::{ - hipDeviceAttribute_t, hipDeviceGetAttribute, hipError_t, hipGetDeviceProperties, -}; -use ocl_core::{ClDeviceIdPtr, ContextProperties, DeviceType}; -use paste::paste; -use std::{ - cmp, - collections::HashSet, - ffi::c_void, - mem, - os::raw::{c_char, c_int, c_uint}, - ptr, - sync::atomic::{AtomicU32, Ordering}, -}; +use cuda_types::*; +use hip_runtime_sys::*; +use std::{mem, ptr}; -const PROJECT_URL_SUFFIX_SHORT: &'static str = " [ZLUDA]"; -const PROJECT_URL_SUFFIX_LONG: &'static str = " [github.com/vosen/ZLUDA]"; +use super::context; + +const PROJECT_SUFFIX: &[u8] = b" [ZLUDA]\0"; +pub const COMPUTE_CAPABILITY_MAJOR: i32 = 8; +pub const COMPUTE_CAPABILITY_MINOR: i32 = 8; + +pub(crate) fn compute_capability(major: &mut i32, minor: &mut i32, _dev: hipDevice_t) -> CUresult { + *major = COMPUTE_CAPABILITY_MAJOR; + *minor = COMPUTE_CAPABILITY_MINOR; + Ok(()) +} + +pub(crate) fn get(device: *mut hipDevice_t, ordinal: i32) -> hipError_t { + unsafe { hipDeviceGet(device, ordinal) } +} #[allow(warnings)] -trait hipDeviceAttribute_t_ext { +trait DeviceAttributeNames { + const hipDeviceAttributeGpuOverlap: hipDeviceAttribute_t = + hipDeviceAttribute_t::hipDeviceAttributeDeviceOverlap; const hipDeviceAttributeMaximumTexture1DWidth: hipDeviceAttribute_t = hipDeviceAttribute_t::hipDeviceAttributeMaxTexture1DWidth; const hipDeviceAttributeMaximumTexture2DWidth: hipDeviceAttribute_t = @@ -42,307 +40,300 @@ trait hipDeviceAttribute_t_ext { hipDeviceAttribute_t::hipDeviceAttributeMaxThreadsPerMultiProcessor; const hipDeviceAttributeAsyncEngineCount: hipDeviceAttribute_t = hipDeviceAttribute_t::hipDeviceAttributeConcurrentKernels; + const hipDeviceAttributePciDomainId: hipDeviceAttribute_t = + hipDeviceAttribute_t::hipDeviceAttributePciDomainID; + const hipDeviceAttributeMultiGpuBoard: hipDeviceAttribute_t = + hipDeviceAttribute_t::hipDeviceAttributeIsMultiGpuBoard; + const hipDeviceAttributeMultiGpuBoardGroupId: hipDeviceAttribute_t = + hipDeviceAttribute_t::hipDeviceAttributeMultiGpuBoardGroupID; + const hipDeviceAttributeMaxSharedMemoryPerBlockOptin: hipDeviceAttribute_t = + hipDeviceAttribute_t::hipDeviceAttributeSharedMemPerBlockOptin; } -impl hipDeviceAttribute_t_ext for hipDeviceAttribute_t {} +impl DeviceAttributeNames for hipDeviceAttribute_t {} macro_rules! remap_attribute { ($attrib:expr => $([ $($word:expr)* ]),*,) => { match $attrib { $( - paste! { CUdevice_attribute:: [< CU_DEVICE_ATTRIBUTE $(_ $word:upper)* >] } => { - paste! { hipDeviceAttribute_t:: [< hipDeviceAttribute $($word:camel)* >] } + paste::paste! { CUdevice_attribute:: [< CU_DEVICE_ATTRIBUTE $(_ $word:upper)* >] } => { + paste::paste! { hipDeviceAttribute_t:: [< hipDeviceAttribute $($word:camel)* >] } } )* - _ => return hipError_t::hipErrorInvalidValue + _ => return Err(hipErrorCode_t::NotSupported) } } } -pub fn get_attribute(pi: *mut i32, attrib: CUdevice_attribute, dev_idx: c_int) -> hipError_t { - if pi == ptr::null_mut() { - return hipError_t::hipErrorInvalidValue; - } - //let mut props = unsafe { mem::zeroed() }; - let hip_attrib = match attrib { - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT => { - unsafe { *pi = 1 }; - return hipError_t::hipSuccess; +pub(crate) fn get_attribute( + pi: &mut i32, + attrib: CUdevice_attribute, + dev_idx: hipDevice_t, +) -> hipError_t { + match attrib { + CUdevice_attribute::CU_DEVICE_ATTRIBUTE_WARP_SIZE => { + *pi = 32; + return Ok(()); } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED => { - unsafe { *pi = 1 }; - return hipError_t::hipSuccess; - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_TCC_DRIVER - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID => { - unsafe { *pi = 0 }; - return hipError_t::hipSuccess; + CUdevice_attribute::CU_DEVICE_ATTRIBUTE_TCC_DRIVER => { + *pi = 0; + return Ok(()); } CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR => { - unsafe { *pi = 8 }; - return hipError_t::hipSuccess; + *pi = COMPUTE_CAPABILITY_MAJOR; + return Ok(()); } CUdevice_attribute::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR => { - unsafe { *pi = 0 }; - return hipError_t::hipSuccess; - } - // we assume that arrayed texts have the same limits - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DHeight - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture1DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture1DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DHeight - } - // we treat surface the same as texture - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT => { - hipDeviceAttribute_t::hipDeviceAttributeTextureAlignment - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture1DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DHeight - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture3DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture3DHeight - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture3DDepth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DWidth - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture2DHeight - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH => { - hipDeviceAttribute_t::hipDeviceAttributeMaxTexture1DWidth - } - // Totally made up - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS - | CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS => { - unsafe { *pi = u16::MAX as i32 }; - return hipError_t::hipSuccess; + *pi = COMPUTE_CAPABILITY_MINOR; + return Ok(()); } - // linear sizes - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH => { - let mut prop = unsafe { mem::zeroed() }; - let err = unsafe { hipGetDeviceProperties(&mut prop, dev_idx) }; - if err != hipError_t::hipSuccess { - return err; - } - unsafe { *pi = prop.maxTexture1DLinear }; - return hipError_t::hipSuccess; - } - CUdevice_attribute::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID => { - let mut prop = unsafe { mem::zeroed() }; - let err = unsafe { hipGetDeviceProperties(&mut prop, dev_idx) }; - if err != hipError_t::hipSuccess { - return err; - } - unsafe { *pi = prop.pciDomainID }; - return hipError_t::hipSuccess; - } - attrib => remap_attribute! { - attrib => - [MAX THREADS PER BLOCK], - [MAX BLOCK DIM X], - [MAX BLOCK DIM Y], - [MAX BLOCK DIM Z], - [MAX GRID DIM X], - [MAX GRID DIM Y], - [MAX GRID DIM Z], - [MAX SHARED MEMORY PER BLOCK], - [TOTAL CONSTANT MEMORY], - [WARP SIZE], - [MAX PITCH], - [MAX REGISTERS PER BLOCK], - [CLOCK RATE], - [TEXTURE ALIGNMENT], - //[GPU OVERLAP], - [MULTIPROCESSOR COUNT], - [KERNEL EXEC TIMEOUT], - [INTEGRATED], - [CAN MAP HOST MEMORY], - [COMPUTE MODE], - [MAXIMUM TEXTURE1D WIDTH], - [MAXIMUM TEXTURE2D WIDTH], - [MAXIMUM TEXTURE2D HEIGHT], - [MAXIMUM TEXTURE3D WIDTH], - [MAXIMUM TEXTURE3D HEIGHT], - [MAXIMUM TEXTURE3D DEPTH], - //[MAXIMUM TEXTURE2D LAYERED WIDTH], - //[MAXIMUM TEXTURE2D LAYERED HEIGHT], - //[MAXIMUM TEXTURE2D LAYERED LAYERS], - //[MAXIMUM TEXTURE2D ARRAY WIDTH], - //[MAXIMUM TEXTURE2D ARRAY HEIGHT], - //[MAXIMUM TEXTURE2D ARRAY NUMSLICES], - //[SURFACE ALIGNMENT], - [CONCURRENT KERNELS], - [ECC ENABLED], - [PCI BUS ID], - [PCI DEVICE ID], - //[TCC DRIVER], - [MEMORY CLOCK RATE], - [GLOBAL MEMORY BUS WIDTH], - [L2 CACHE SIZE], - [MAX THREADS PER MULTIPROCESSOR], - [ASYNC ENGINE COUNT], - //[UNIFIED ADDRESSING], - //[MAXIMUM TEXTURE1D LAYERED WIDTH], - //[MAXIMUM TEXTURE1D LAYERED LAYERS], - //[CAN TEX2D GATHER], - //[MAXIMUM TEXTURE2D GATHER WIDTH], - //[MAXIMUM TEXTURE2D GATHER HEIGHT], - //[MAXIMUM TEXTURE3D WIDTH ALTERNATE], - //[MAXIMUM TEXTURE3D HEIGHT ALTERNATE], - //[MAXIMUM TEXTURE3D DEPTH ALTERNATE], - //[PCI DOMAIN ID], - [TEXTURE PITCH ALIGNMENT], - //[MAXIMUM TEXTURECUBEMAP WIDTH], - //[MAXIMUM TEXTURECUBEMAP LAYERED WIDTH], - //[MAXIMUM TEXTURECUBEMAP LAYERED LAYERS], - //[MAXIMUM SURFACE1D WIDTH], - //[MAXIMUM SURFACE2D WIDTH], - //[MAXIMUM SURFACE2D HEIGHT], - //[MAXIMUM SURFACE3D WIDTH], - //[MAXIMUM SURFACE3D HEIGHT], - //[MAXIMUM SURFACE3D DEPTH], - //[MAXIMUM SURFACE1D LAYERED WIDTH], - //[MAXIMUM SURFACE1D LAYERED LAYERS], - //[MAXIMUM SURFACE2D LAYERED WIDTH], - //[MAXIMUM SURFACE2D LAYERED HEIGHT], - //[MAXIMUM SURFACE2D LAYERED LAYERS], - //[MAXIMUM SURFACECUBEMAP WIDTH], - //[MAXIMUM SURFACECUBEMAP LAYERED WIDTH], - //[MAXIMUM SURFACECUBEMAP LAYERED LAYERS], - //[MAXIMUM TEXTURE1D LINEAR WIDTH], - //[MAXIMUM TEXTURE2D LINEAR WIDTH], - //[MAXIMUM TEXTURE2D LINEAR HEIGHT], - //[MAXIMUM TEXTURE2D LINEAR PITCH], - //[MAXIMUM TEXTURE2D MIPMAPPED WIDTH], - //[MAXIMUM TEXTURE2D MIPMAPPED HEIGHT], - //[COMPUTE CAPABILITY MAJOR], - //[COMPUTE CAPABILITY MINOR], - //[MAXIMUM TEXTURE1D MIPMAPPED WIDTH], - //[STREAM PRIORITIES SUPPORTED], - //[GLOBAL L1 CACHE SUPPORTED], - //[LOCAL L1 CACHE SUPPORTED], - [MAX SHARED MEMORY PER MULTIPROCESSOR], - //[MAX REGISTERS PER MULTIPROCESSOR], - [MANAGED MEMORY], - //[MULTI GPU BOARD], - //[MULTI GPU BOARD GROUP ID], - //[HOST NATIVE ATOMIC SUPPORTED], - //[SINGLE TO DOUBLE PRECISION PERF RATIO], - [PAGEABLE MEMORY ACCESS], - [CONCURRENT MANAGED ACCESS], - //[COMPUTE PREEMPTION SUPPORTED], - //[CAN USE HOST POINTER FOR REGISTERED MEM], - //[CAN USE STREAM MEM OPS], - //[CAN USE 64 BIT STREAM MEM OPS], - //[CAN USE STREAM WAIT VALUE NOR], - [COOPERATIVE LAUNCH], - [COOPERATIVE MULTI DEVICE LAUNCH], - //[MAX SHARED MEMORY PER BLOCK OPTIN], - //[CAN FLUSH REMOTE WRITES], - //[HOST REGISTER SUPPORTED], - [PAGEABLE MEMORY ACCESS USES HOST PAGE TABLES], - [DIRECT MANAGED MEM ACCESS FROM HOST], - //[VIRTUAL ADDRESS MANAGEMENT SUPPORTED], - //[VIRTUAL MEMORY MANAGEMENT SUPPORTED], - //[HANDLE TYPE POSIX FILE DESCRIPTOR SUPPORTED], - //[HANDLE TYPE WIN32 HANDLE SUPPORTED], - //[HANDLE TYPE WIN32 KMT HANDLE SUPPORTED], - //[MAX BLOCKS PER MULTIPROCESSOR], - //[GENERIC COMPRESSION SUPPORTED], - //[MAX PERSISTING L2 CACHE SIZE], - //[MAX ACCESS POLICY WINDOW SIZE], - //[GPU DIRECT RDMA WITH CUDA VMM SUPPORTED], - //[RESERVED SHARED MEMORY PER BLOCK], - //[SPARSE CUDA ARRAY SUPPORTED], - //[READ ONLY HOST REGISTER SUPPORTED], - //[TIMELINE SEMAPHORE INTEROP SUPPORTED], - //[MEMORY POOLS SUPPORTED], - }, + _ => {} + } + let attrib = remap_attribute! { + attrib => + [MAX THREADS PER BLOCK], + [MAX BLOCK DIM X], + [MAX BLOCK DIM Y], + [MAX BLOCK DIM Z], + [MAX GRID DIM X], + [MAX GRID DIM Y], + [MAX GRID DIM Z], + [MAX SHARED MEMORY PER BLOCK], + [TOTAL CONSTANT MEMORY], + //[WARP SIZE], + [MAX PITCH], + [MAX REGISTERS PER BLOCK], + [CLOCK RATE], + [TEXTURE ALIGNMENT], + [GPU OVERLAP], + [MULTIPROCESSOR COUNT], + [KERNEL EXEC TIMEOUT], + [INTEGRATED], + [CAN MAP HOST MEMORY], + [COMPUTE MODE], + [MAXIMUM TEXTURE1D WIDTH], + [MAXIMUM TEXTURE2D WIDTH], + [MAXIMUM TEXTURE2D HEIGHT], + [MAXIMUM TEXTURE3D WIDTH], + [MAXIMUM TEXTURE3D HEIGHT], + [MAXIMUM TEXTURE3D DEPTH], + //[MAXIMUM TEXTURE2D LAYERED WIDTH], + //[MAXIMUM TEXTURE2D LAYERED HEIGHT], + //[MAXIMUM TEXTURE2D LAYERED LAYERS], + //[MAXIMUM TEXTURE2D ARRAY WIDTH], + //[MAXIMUM TEXTURE2D ARRAY HEIGHT], + //[MAXIMUM TEXTURE2D ARRAY NUMSLICES], + [SURFACE ALIGNMENT], + [CONCURRENT KERNELS], + [ECC ENABLED], + [PCI BUS ID], + [PCI DEVICE ID], + //[TCC DRIVER], + [MEMORY CLOCK RATE], + [GLOBAL MEMORY BUS WIDTH], + [L2 CACHE SIZE], + [MAX THREADS PER MULTIPROCESSOR], + [ASYNC ENGINE COUNT], + [UNIFIED ADDRESSING], + //[MAXIMUM TEXTURE1D LAYERED WIDTH], + //[MAXIMUM TEXTURE1D LAYERED LAYERS], + //[CAN TEX2D GATHER], + //[MAXIMUM TEXTURE2D GATHER WIDTH], + //[MAXIMUM TEXTURE2D GATHER HEIGHT], + //[MAXIMUM TEXTURE3D WIDTH ALTERNATE], + //[MAXIMUM TEXTURE3D HEIGHT ALTERNATE], + //[MAXIMUM TEXTURE3D DEPTH ALTERNATE], + [PCI DOMAIN ID], + [TEXTURE PITCH ALIGNMENT], + //[MAXIMUM TEXTURECUBEMAP WIDTH], + //[MAXIMUM TEXTURECUBEMAP LAYERED WIDTH], + //[MAXIMUM TEXTURECUBEMAP LAYERED LAYERS], + //[MAXIMUM SURFACE1D WIDTH], + //[MAXIMUM SURFACE2D WIDTH], + //[MAXIMUM SURFACE2D HEIGHT], + //[MAXIMUM SURFACE3D WIDTH], + //[MAXIMUM SURFACE3D HEIGHT], + //[MAXIMUM SURFACE3D DEPTH], + //[MAXIMUM SURFACE1D LAYERED WIDTH], + //[MAXIMUM SURFACE1D LAYERED LAYERS], + //[MAXIMUM SURFACE2D LAYERED WIDTH], + //[MAXIMUM SURFACE2D LAYERED HEIGHT], + //[MAXIMUM SURFACE2D LAYERED LAYERS], + //[MAXIMUM SURFACECUBEMAP WIDTH], + //[MAXIMUM SURFACECUBEMAP LAYERED WIDTH], + //[MAXIMUM SURFACECUBEMAP LAYERED LAYERS], + //[MAXIMUM TEXTURE1D LINEAR WIDTH], + //[MAXIMUM TEXTURE2D LINEAR WIDTH], + //[MAXIMUM TEXTURE2D LINEAR HEIGHT], + //[MAXIMUM TEXTURE2D LINEAR PITCH], + //[MAXIMUM TEXTURE2D MIPMAPPED WIDTH], + //[MAXIMUM TEXTURE2D MIPMAPPED HEIGHT], + //[COMPUTE CAPABILITY MAJOR], + //[COMPUTE CAPABILITY MINOR], + //[MAXIMUM TEXTURE1D MIPMAPPED WIDTH], + [STREAM PRIORITIES SUPPORTED], + [GLOBAL L1 CACHE SUPPORTED], + [LOCAL L1 CACHE SUPPORTED], + [MAX SHARED MEMORY PER MULTIPROCESSOR], + [MAX REGISTERS PER MULTIPROCESSOR], + [MANAGED MEMORY], + [MULTI GPU BOARD], + [MULTI GPU BOARD GROUP ID], + [HOST NATIVE ATOMIC SUPPORTED], + [SINGLE TO DOUBLE PRECISION PERF RATIO], + [PAGEABLE MEMORY ACCESS], + [CONCURRENT MANAGED ACCESS], + [COMPUTE PREEMPTION SUPPORTED], + [CAN USE HOST POINTER FOR REGISTERED MEM], + //[CAN USE STREAM MEM OPS], + [COOPERATIVE LAUNCH], + [COOPERATIVE MULTI DEVICE LAUNCH], + [MAX SHARED MEMORY PER BLOCK OPTIN], + //[CAN FLUSH REMOTE WRITES], + [HOST REGISTER SUPPORTED], + [PAGEABLE MEMORY ACCESS USES HOST PAGE TABLES], + [DIRECT MANAGED MEM ACCESS FROM HOST], + //[VIRTUAL ADDRESS MANAGEMENT SUPPORTED], + [VIRTUAL MEMORY MANAGEMENT SUPPORTED], + //[HANDLE TYPE POSIX FILE DESCRIPTOR SUPPORTED], + //[HANDLE TYPE WIN32 HANDLE SUPPORTED], + //[HANDLE TYPE WIN32 KMT HANDLE SUPPORTED], + //[MAX BLOCKS PER MULTIPROCESSOR], + //[GENERIC COMPRESSION SUPPORTED], + //[MAX PERSISTING L2 CACHE SIZE], + //[MAX ACCESS POLICY WINDOW SIZE], + //[GPU DIRECT RDMA WITH CUDA VMM SUPPORTED], + //[RESERVED SHARED MEMORY PER BLOCK], + //[SPARSE CUDA ARRAY SUPPORTED], + //[READ ONLY HOST REGISTER SUPPORTED], + //[TIMELINE SEMAPHORE INTEROP SUPPORTED], + [MEMORY POOLS SUPPORTED], + //[GPU DIRECT RDMA SUPPORTED], + //[GPU DIRECT RDMA FLUSH WRITES OPTIONS], + //[GPU DIRECT RDMA WRITES ORDERING], + //[MEMPOOL SUPPORTED HANDLE TYPES], + //[CLUSTER LAUNCH], + //[DEFERRED MAPPING CUDA ARRAY SUPPORTED], + //[CAN USE 64 BIT STREAM MEM OPS], + //[CAN USE STREAM WAIT VALUE NOR], + //[DMA BUF SUPPORTED], + //[IPC EVENT SUPPORTED], + //[MEM SYNC DOMAIN COUNT], + //[TENSOR MAP ACCESS SUPPORTED], + //[HANDLE TYPE FABRIC SUPPORTED], + //[UNIFIED FUNCTION POINTERS], + //[NUMA CONFIG], + //[NUMA ID], + //[MULTICAST SUPPORTED], + //[MPS ENABLED], + //[HOST NUMA ID], }; - unsafe { hipDeviceGetAttribute(pi, hip_attrib, dev_idx) } + unsafe { hipDeviceGetAttribute(pi, attrib, dev_idx) } +} + +pub(crate) fn get_uuid(uuid: *mut hipUUID, device: hipDevice_t) -> hipError_t { + unsafe { hipDeviceGetUuid(uuid, device) } +} + +pub(crate) fn get_uuid_v2(uuid: *mut hipUUID, device: hipDevice_t) -> hipError_t { + get_uuid(uuid, device) } -pub fn get_uuid(uuid: *mut CUuuid_st, _dev_idx: c_int) -> Result<(), CUresult> { +pub(crate) fn get_luid( + luid: *mut ::core::ffi::c_char, + device_node_mask: &mut ::core::ffi::c_uint, + dev: hipDevice_t, +) -> hipError_t { + let luid = unsafe { + luid.cast::<[i8; 8]>() + .as_mut() + .ok_or(hipErrorCode_t::InvalidValue) + }?; + let mut properties = unsafe { mem::zeroed() }; + unsafe { hipGetDevicePropertiesR0600(&mut properties, dev) }?; + *luid = properties.luid; + *device_node_mask = properties.luidDeviceNodeMask; + Ok(()) +} + +pub(crate) fn get_name( + name: *mut ::core::ffi::c_char, + len: ::core::ffi::c_int, + dev: hipDevice_t, +) -> cuda_types::CUresult { + unsafe { hipDeviceGetName(name, len, dev) }?; + let len = len as usize; + let buffer = unsafe { std::slice::from_raw_parts(name, len) }; + let first_zero = buffer.iter().position(|c| *c == 0); + let first_zero = if let Some(x) = first_zero { + x + } else { + return Ok(()); + }; + if (first_zero + PROJECT_SUFFIX.len()) > len { + return Ok(()); + } unsafe { - *uuid = CUuuid_st { - bytes: mem::zeroed(), - } + ptr::copy_nonoverlapping( + PROJECT_SUFFIX.as_ptr() as _, + name.add(first_zero), + PROJECT_SUFFIX.len(), + ) }; Ok(()) } -// TODO: add support if Level 0 exposes it -pub fn get_luid( - luid: *mut c_char, - dev_node_mask: *mut c_uint, - _dev_idx: c_int, -) -> Result<(), CUresult> { - unsafe { ptr::write_bytes(luid, 0u8, 8) }; - unsafe { *dev_node_mask = 0 }; +pub(crate) fn total_mem_v2(bytes: *mut usize, dev: hipDevice_t) -> hipError_t { + unsafe { hipDeviceTotalMem(bytes, dev) } +} + +pub(crate) fn get_properties(prop: &mut cuda_types::CUdevprop, dev: hipDevice_t) -> hipError_t { + let mut hip_props = unsafe { mem::zeroed() }; + unsafe { hipGetDevicePropertiesR0600(&mut hip_props, dev) }?; + prop.maxThreadsPerBlock = hip_props.maxThreadsPerBlock; + prop.maxThreadsDim = hip_props.maxThreadsDim; + prop.maxGridSize = hip_props.maxGridSize; + prop.totalConstantMemory = clamp_usize(hip_props.totalConstMem); + prop.SIMDWidth = 32; + prop.memPitch = clamp_usize(hip_props.memPitch); + prop.regsPerBlock = hip_props.regsPerBlock; + prop.clockRate = hip_props.clockRate; + prop.textureAlign = clamp_usize(hip_props.textureAlignment); + Ok(()) +} + +pub(crate) fn get_count(count: &mut ::core::ffi::c_int) -> hipError_t { + unsafe { hipGetDeviceCount(count) } +} + +fn clamp_usize(x: usize) -> i32 { + usize::min(x, i32::MAX as usize) as i32 +} + +pub(crate) fn primary_context_retain( + pctx: &mut CUcontext, + hip_dev: hipDevice_t, +) -> Result<(), CUerror> { + let (ctx, raw_ctx) = context::get_primary(hip_dev)?; + { + let mut mutable_ctx = ctx.mutable.lock().map_err(|_| CUerror::UNKNOWN)?; + mutable_ctx.ref_count += 1; + } + *pctx = raw_ctx; Ok(()) } -pub(crate) unsafe fn get_properties(prop: *mut CUdevprop, dev: CUdevice) -> Result<(), hipError_t> { - if prop == ptr::null_mut() { - return Err(hipError_t::hipErrorInvalidValue); +pub(crate) fn primary_context_release(hip_dev: hipDevice_t) -> Result<(), CUerror> { + let (ctx, _) = context::get_primary(hip_dev)?; + { + let mut mutable_ctx = ctx.mutable.lock().map_err(|_| CUerror::UNKNOWN)?; + if mutable_ctx.ref_count == 0 { + return Err(CUerror::INVALID_CONTEXT); + } + mutable_ctx.ref_count -= 1; + if mutable_ctx.ref_count == 0 { + // TODO: drop all children + } } - let mut hip_props = mem::zeroed(); - hip_call! { hipGetDeviceProperties(&mut hip_props, dev.0) }; - (*prop).maxThreadsPerBlock = hip_props.maxThreadsPerBlock; - (*prop).maxThreadsDim = hip_props.maxThreadsDim; - (*prop).maxGridSize = hip_props.maxGridSize; - (*prop).totalConstantMemory = usize::min(hip_props.totalConstMem, i32::MAX as usize) as i32; - (*prop).SIMDWidth = hip_props.warpSize; - (*prop).memPitch = usize::min(hip_props.memPitch, i32::MAX as usize) as i32; - (*prop).regsPerBlock = hip_props.regsPerBlock; - (*prop).clockRate = hip_props.clockRate; - (*prop).textureAlign = usize::min(hip_props.textureAlignment, i32::MAX as usize) as i32; Ok(()) } diff --git a/zluda/src/impl/driver.rs b/zluda/src/impl/driver.rs new file mode 100644 index 00000000..7ff2f546 --- /dev/null +++ b/zluda/src/impl/driver.rs @@ -0,0 +1,79 @@ +use cuda_types::*; +use hip_runtime_sys::*; +use std::{ + ffi::{CStr, CString}, + mem, slice, + sync::OnceLock, +}; + +use crate::r#impl::context; + +use super::LiveCheck; + +pub(crate) struct GlobalState { + pub devices: Vec, +} + +pub(crate) struct Device { + pub(crate) _comgr_isa: CString, + primary_context: LiveCheck, +} + +impl Device { + pub(crate) fn primary_context<'a>(&'a self) -> (&'a context::Context, CUcontext) { + unsafe { + ( + self.primary_context.data.assume_init_ref(), + self.primary_context.as_handle(), + ) + } + } +} + +pub(crate) fn device(dev: i32) -> Result<&'static Device, CUerror> { + global_state()? + .devices + .get(dev as usize) + .ok_or(CUerror::INVALID_DEVICE) +} + +pub(crate) fn global_state() -> Result<&'static GlobalState, CUerror> { + static GLOBAL_STATE: OnceLock> = OnceLock::new(); + fn cast_slice<'a>(bytes: &'a [i8]) -> &'a [u8] { + unsafe { slice::from_raw_parts(bytes.as_ptr().cast(), bytes.len()) } + } + GLOBAL_STATE + .get_or_init(|| { + let mut device_count = 0; + unsafe { hipGetDeviceCount(&mut device_count) }?; + Ok(GlobalState { + devices: (0..device_count) + .map(|i| { + let mut props = unsafe { mem::zeroed() }; + unsafe { hipGetDevicePropertiesR0600(&mut props, i) }?; + Ok::<_, CUerror>(Device { + _comgr_isa: CStr::from_bytes_until_nul(cast_slice( + &props.gcnArchName[..], + )) + .map_err(|_| CUerror::UNKNOWN)? + .to_owned(), + primary_context: LiveCheck::new(context::new(i)), + }) + }) + .collect::, _>>()?, + }) + }) + .as_ref() + .map_err(|e| *e) +} + +pub(crate) fn init(flags: ::core::ffi::c_uint) -> CUresult { + unsafe { hipInit(flags) }?; + global_state()?; + Ok(()) +} + +pub(crate) fn get_version(version: &mut ::core::ffi::c_int) -> CUresult { + *version = cuda_types::CUDA_VERSION as i32; + Ok(()) +} diff --git a/zluda/src/impl/function.rs b/zluda/src/impl/function.rs index 7f35bb4b..8d006ec8 100644 --- a/zluda/src/impl/function.rs +++ b/zluda/src/impl/function.rs @@ -1,26 +1,46 @@ -use hip_runtime_sys::{hipError_t, hipFuncAttribute, hipFuncGetAttribute, hipFuncGetAttributes, hipFunction_attribute, hipLaunchKernel, hipModuleLaunchKernel}; - -use super::{CUresult, HasLivenessCookie, LiveCheck}; -use crate::cuda::{CUfunction, CUfunction_attribute, CUstream}; -use ::std::os::raw::{c_uint, c_void}; -use std::{mem, ptr}; +use hip_runtime_sys::*; pub(crate) fn get_attribute( - pi: *mut i32, - cu_attrib: CUfunction_attribute, - func: CUfunction, + pi: &mut i32, + cu_attrib: hipFunction_attribute, + func: hipFunction_t, +) -> hipError_t { + // TODO: implement HIP_FUNC_ATTRIBUTE_PTX_VERSION + // TODO: implement HIP_FUNC_ATTRIBUTE_BINARY_VERSION + unsafe { hipFuncGetAttribute(pi, cu_attrib, func) }?; + if cu_attrib == hipFunction_attribute::HIP_FUNC_ATTRIBUTE_NUM_REGS { + *pi = (*pi).max(1); + } + Ok(()) +} + +pub(crate) fn launch_kernel( + f: hipFunction_t, + grid_dim_x: ::core::ffi::c_uint, + grid_dim_y: ::core::ffi::c_uint, + grid_dim_z: ::core::ffi::c_uint, + block_dim_x: ::core::ffi::c_uint, + block_dim_y: ::core::ffi::c_uint, + block_dim_z: ::core::ffi::c_uint, + shared_mem_bytes: ::core::ffi::c_uint, + stream: hipStream_t, + kernel_params: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, ) -> hipError_t { - if pi == ptr::null_mut() || func == ptr::null_mut() { - return hipError_t::hipErrorInvalidValue; + // TODO: fix constants in extra + unsafe { + hipModuleLaunchKernel( + f, + grid_dim_x, + grid_dim_y, + grid_dim_z, + block_dim_x, + block_dim_y, + block_dim_z, + shared_mem_bytes, + stream, + kernel_params, + extra, + ) } - let attrib = match cu_attrib { - CUfunction_attribute::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK => { - hipFunction_attribute::HIP_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK - } - CUfunction_attribute::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES => { - hipFunction_attribute::HIP_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES - } - _ => return hipError_t::hipErrorInvalidValue, - }; - unsafe { hipFuncGetAttribute(pi, attrib, func as _) } } diff --git a/zluda/src/impl/link.rs b/zluda/src/impl/link.rs deleted file mode 100644 index d66608f5..00000000 --- a/zluda/src/impl/link.rs +++ /dev/null @@ -1,86 +0,0 @@ -use std::{ - ffi::{c_void, CStr}, - mem, ptr, slice, -}; - -use hip_runtime_sys::{hipCtxGetDevice, hipError_t, hipGetDeviceProperties}; - -use crate::{ - cuda::{CUjitInputType, CUjit_option, CUlinkState, CUresult}, - hip_call, -}; - -use super::module::{self, SpirvModule}; - -struct LinkState { - modules: Vec, - result: Option>, -} - -pub(crate) unsafe fn create( - num_options: u32, - options: *mut CUjit_option, - option_values: *mut *mut c_void, - state_out: *mut CUlinkState, -) -> CUresult { - if state_out == ptr::null_mut() { - return CUresult::CUDA_ERROR_INVALID_VALUE; - } - let state = Box::new(LinkState { - modules: Vec::new(), - result: None, - }); - *state_out = mem::transmute(state); - CUresult::CUDA_SUCCESS -} - -pub(crate) unsafe fn add_data( - state: CUlinkState, - type_: CUjitInputType, - data: *mut c_void, - size: usize, - name: *const i8, - num_options: u32, - options: *mut CUjit_option, - option_values: *mut *mut c_void, -) -> Result<(), hipError_t> { - if state == ptr::null_mut() { - return Err(hipError_t::hipErrorInvalidValue); - } - let state: *mut LinkState = mem::transmute(state); - let state = &mut *state; - // V-RAY specific hack - if state.modules.len() == 2 { - return Err(hipError_t::hipSuccess); - } - let spirv_data = SpirvModule::new_raw(data as *const _)?; - state.modules.push(spirv_data); - Ok(()) -} - -pub(crate) unsafe fn complete( - state: CUlinkState, - cubin_out: *mut *mut c_void, - size_out: *mut usize, -) -> Result<(), hipError_t> { - let mut dev = 0; - hip_call! { hipCtxGetDevice(&mut dev) }; - let mut props = unsafe { mem::zeroed() }; - hip_call! { hipGetDeviceProperties(&mut props, dev) }; - let state: &mut LinkState = mem::transmute(state); - let spirv_bins = state.modules.iter().map(|m| &m.binaries[..]); - let should_link_ptx_impl = state.modules.iter().find_map(|m| m.should_link_ptx_impl); - let mut arch_binary = module::compile_amd(&props, spirv_bins, should_link_ptx_impl) - .map_err(|_| hipError_t::hipErrorUnknown)?; - let ptr = arch_binary.as_mut_ptr(); - let size = arch_binary.len(); - state.result = Some(arch_binary); - *cubin_out = ptr as _; - *size_out = size; - Ok(()) -} - -pub(crate) unsafe fn destroy(state: CUlinkState) -> CUresult { - let state: Box = mem::transmute(state); - CUresult::CUDA_SUCCESS -} diff --git a/zluda/src/impl/memory.rs b/zluda/src/impl/memory.rs index 60416238..38437760 100644 --- a/zluda/src/impl/memory.rs +++ b/zluda/src/impl/memory.rs @@ -1,55 +1,35 @@ -use hip_runtime_sys::{ - hipDrvMemcpy3D, hipError_t, hipMemcpy3D, hipMemcpy3DParms, hipMemoryType, hipPitchedPtr, - hipPos, HIP_MEMCPY3D, -}; -use std::ptr; +use hip_runtime_sys::*; -use crate::{ - cuda::{CUDA_MEMCPY3D_st, CUdeviceptr, CUmemorytype, CUresult}, - hip_call, -}; +pub(crate) fn alloc_v2(dptr: *mut hipDeviceptr_t, bytesize: usize) -> hipError_t { + unsafe { hipMalloc(dptr.cast(), bytesize) }?; + // TODO: parametrize for non-Geekbench + unsafe { hipMemsetD8(*dptr, 0, bytesize) } +} + +pub(crate) fn free_v2(dptr: hipDeviceptr_t) -> hipError_t { + unsafe { hipFree(dptr.0) } +} + +pub(crate) fn copy_dto_h_v2( + dst_host: *mut ::core::ffi::c_void, + src_device: hipDeviceptr_t, + byte_count: usize, +) -> hipError_t { + unsafe { hipMemcpyDtoH(dst_host, src_device, byte_count) } +} -// TODO change HIP impl to 64 bits -pub(crate) unsafe fn copy_3d(cu_copy: *const CUDA_MEMCPY3D_st) -> Result<(), hipError_t> { - if cu_copy == ptr::null() { - return Err(hipError_t::hipErrorInvalidValue); - } - let cu_copy = *cu_copy; - let hip_copy = HIP_MEMCPY3D { - srcXInBytes: cu_copy.srcXInBytes as u32, - srcY: cu_copy.srcY as u32, - srcZ: cu_copy.srcZ as u32, - srcLOD: cu_copy.srcLOD as u32, - srcMemoryType: memory_type(cu_copy.srcMemoryType)?, - srcHost: cu_copy.srcHost, - srcDevice: cu_copy.srcDevice.0 as _, - srcArray: cu_copy.srcArray as _, - srcPitch: cu_copy.srcPitch as u32, - srcHeight: cu_copy.srcHeight as u32, - dstXInBytes: cu_copy.dstXInBytes as u32, - dstY: cu_copy.dstY as u32, - dstZ: cu_copy.dstZ as u32, - dstLOD: cu_copy.dstLOD as u32, - dstMemoryType: memory_type(cu_copy.dstMemoryType)?, - dstHost: cu_copy.dstHost, - dstDevice: cu_copy.dstDevice.0 as _, - dstArray: cu_copy.dstArray as _, - dstPitch: cu_copy.dstPitch as u32, - dstHeight: cu_copy.dstHeight as u32, - WidthInBytes: cu_copy.WidthInBytes as u32, - Height: cu_copy.Height as u32, - Depth: cu_copy.Depth as u32, - }; - hip_call! { hipDrvMemcpy3D(&hip_copy) }; - Ok(()) +pub(crate) fn copy_hto_d_v2( + dst_device: hipDeviceptr_t, + src_host: *const ::core::ffi::c_void, + byte_count: usize, +) -> hipError_t { + unsafe { hipMemcpyHtoD(dst_device, src_host.cast_mut(), byte_count) } } -pub(crate) fn memory_type(cu: CUmemorytype) -> Result { - match cu { - CUmemorytype::CU_MEMORYTYPE_HOST => Ok(hipMemoryType::hipMemoryTypeHost), - CUmemorytype::CU_MEMORYTYPE_DEVICE => Ok(hipMemoryType::hipMemoryTypeDevice), - CUmemorytype::CU_MEMORYTYPE_ARRAY => Ok(hipMemoryType::hipMemoryTypeArray), - CUmemorytype::CU_MEMORYTYPE_UNIFIED => Ok(hipMemoryType::hipMemoryTypeUnified), - _ => Err(hipError_t::hipErrorInvalidValue), - } +pub(crate) fn get_address_range_v2( + pbase: *mut hipDeviceptr_t, + psize: *mut usize, + dptr: hipDeviceptr_t, +) -> hipError_t { + unsafe { hipMemGetAddressRange(pbase, psize, dptr) } } diff --git a/zluda/src/impl/mod.rs b/zluda/src/impl/mod.rs index 1335ef66..766b4a52 100644 --- a/zluda/src/impl/mod.rs +++ b/zluda/src/impl/mod.rs @@ -1,230 +1,209 @@ -use hip_runtime_sys::hipError_t; - -use crate::cuda::{CUctx_st, CUdevice, CUdeviceptr, CUfunc_st, CUmod_st, CUresult, CUstream_st}; -use std::{ - ffi::c_void, - mem::{self, ManuallyDrop}, - os::raw::c_int, - ptr, - sync::Mutex, - sync::TryLockError, -}; - -#[cfg(test)] -#[macro_use] -pub mod test; -pub mod device; -pub mod export_table; -pub mod function; -#[cfg_attr(windows, path = "os_win.rs")] -#[cfg_attr(not(windows), path = "os_unix.rs")] -pub(crate) mod os; -pub(crate) mod module; -pub(crate) mod context; -pub(crate) mod memory; -pub(crate) mod link; -pub(crate) mod pointer; +use cuda_types::*; +use hip_runtime_sys::*; +use std::mem::{self, ManuallyDrop, MaybeUninit}; + +pub(super) mod context; +pub(super) mod device; +pub(super) mod driver; +pub(super) mod function; +pub(super) mod memory; +pub(super) mod module; +pub(super) mod pointer; #[cfg(debug_assertions)] -pub fn unimplemented() -> CUresult { +pub(crate) fn unimplemented() -> CUresult { unimplemented!() } #[cfg(not(debug_assertions))] -pub fn unimplemented() -> CUresult { - CUresult::CUDA_ERROR_NOT_SUPPORTED +pub(crate) fn unimplemented() -> CUresult { + CUresult::ERROR_NOT_SUPPORTED } -#[macro_export] -macro_rules! hip_call { - ($expr:expr) => { - #[allow(unused_unsafe)] - { - let err = unsafe { $expr }; - if err != hip_runtime_sys::hipError_t::hipSuccess { - return Result::Err(err); +pub(crate) trait FromCuda<'a, T>: Sized { + fn from_cuda(t: &'a T) -> Result; +} + +macro_rules! from_cuda_nop { + ($($type_:ty),*) => { + $( + impl<'a> FromCuda<'a, $type_> for $type_ { + fn from_cuda(x: &'a $type_) -> Result { + Ok(*x) + } } - } + + impl<'a> FromCuda<'a, *mut $type_> for &'a mut $type_ { + fn from_cuda(x: &'a *mut $type_) -> Result { + match unsafe { x.as_mut() } { + Some(x) => Ok(x), + None => Err(CUerror::INVALID_VALUE), + } + } + } + )* + }; +} + +macro_rules! from_cuda_transmute { + ($($from:ty => $to:ty),*) => { + $( + impl<'a> FromCuda<'a, $from> for $to { + fn from_cuda(x: &'a $from) -> Result { + Ok(unsafe { std::mem::transmute(*x) }) + } + } + + impl<'a> FromCuda<'a, *mut $from> for &'a mut $to { + fn from_cuda(x: &'a *mut $from) -> Result { + match unsafe { x.cast::<$to>().as_mut() } { + Some(x) => Ok(x), + None => Err(CUerror::INVALID_VALUE), + } + } + } + + impl<'a> FromCuda<'a, *mut $from> for * mut $to { + fn from_cuda(x: &'a *mut $from) -> Result { + Ok(x.cast::<$to>()) + } + } + )* + }; +} + +macro_rules! from_cuda_object { + ($($type_:ty),*) => { + $( + impl<'a> FromCuda<'a, <$type_ as ZludaObject>::CudaHandle> for <$type_ as ZludaObject>::CudaHandle { + fn from_cuda(handle: &'a <$type_ as ZludaObject>::CudaHandle) -> Result<<$type_ as ZludaObject>::CudaHandle, CUerror> { + Ok(*handle) + } + } + + impl<'a> FromCuda<'a, *mut <$type_ as ZludaObject>::CudaHandle> for &'a mut <$type_ as ZludaObject>::CudaHandle { + fn from_cuda(handle: &'a *mut <$type_ as ZludaObject>::CudaHandle) -> Result<&'a mut <$type_ as ZludaObject>::CudaHandle, CUerror> { + match unsafe { handle.as_mut() } { + Some(x) => Ok(x), + None => Err(CUerror::INVALID_VALUE), + } + } + } + + impl<'a> FromCuda<'a, <$type_ as ZludaObject>::CudaHandle> for &'a $type_ { + fn from_cuda(handle: &'a <$type_ as ZludaObject>::CudaHandle) -> Result<&'a $type_, CUerror> { + Ok(as_ref(handle).as_result()?) + } + } + )* }; } -pub trait HasLivenessCookie: Sized { +from_cuda_nop!( + *mut i8, + *mut i32, + *mut usize, + *const ::core::ffi::c_void, + *const ::core::ffi::c_char, + *mut ::core::ffi::c_void, + *mut *mut ::core::ffi::c_void, + i32, + u32, + usize, + cuda_types::CUdevprop, + CUdevice_attribute +); +from_cuda_transmute!( + CUuuid => hipUUID, + CUfunction => hipFunction_t, + CUfunction_attribute => hipFunction_attribute, + CUstream => hipStream_t, + CUpointer_attribute => hipPointer_attribute, + CUdeviceptr_v2 => hipDeviceptr_t +); +from_cuda_object!(module::Module, context::Context); + +impl<'a> FromCuda<'a, CUlimit> for hipLimit_t { + fn from_cuda(limit: &'a CUlimit) -> Result { + Ok(match *limit { + CUlimit::CU_LIMIT_STACK_SIZE => hipLimit_t::hipLimitStackSize, + CUlimit::CU_LIMIT_PRINTF_FIFO_SIZE => hipLimit_t::hipLimitPrintfFifoSize, + CUlimit::CU_LIMIT_MALLOC_HEAP_SIZE => hipLimit_t::hipLimitMallocHeapSize, + _ => return Err(CUerror::NOT_SUPPORTED), + }) + } +} + +pub(crate) trait ZludaObject: Sized + Send + Sync { const COOKIE: usize; - const LIVENESS_FAIL: CUresult; + const LIVENESS_FAIL: CUerror = cuda_types::CUerror::INVALID_VALUE; - fn try_drop(&mut self) -> Result<(), CUresult>; + type CudaHandle: Sized; + + fn drop_checked(&mut self) -> CUresult; + + fn wrap(self) -> Self::CudaHandle { + unsafe { mem::transmute_copy(&LiveCheck::wrap(self)) } + } } -// This struct is a best-effort check if wrapped value has been dropped, -// while it's inherently safe, its use coming from FFI is very unsafe #[repr(C)] -pub struct LiveCheck { +pub(crate) struct LiveCheck { cookie: usize, - data: ManuallyDrop, + data: MaybeUninit, } -impl LiveCheck { - pub fn new(data: T) -> Self { +impl LiveCheck { + fn new(data: T) -> Self { LiveCheck { cookie: T::COOKIE, - data: ManuallyDrop::new(data), + data: MaybeUninit::new(data), } } - fn destroy_impl(this: *mut Self) -> Result<(), CUresult> { - let mut ctx_box = ManuallyDrop::new(unsafe { Box::from_raw(this) }); - ctx_box.try_drop()?; - unsafe { ManuallyDrop::drop(&mut ctx_box) }; - Ok(()) + fn as_handle(&self) -> T::CudaHandle { + unsafe { mem::transmute_copy(&self) } } - unsafe fn ptr_from_inner(this: *mut T) -> *mut Self { - let outer_ptr = (this as *mut u8).sub(mem::size_of::()); - outer_ptr as *mut Self + fn wrap(data: T) -> *mut Self { + Box::into_raw(Box::new(Self::new(data))) } - pub unsafe fn as_ref_unchecked(&self) -> &T { - &self.data - } - - pub fn as_option_mut(&mut self) -> Option<&mut T> { + fn as_result(&self) -> Result<&T, CUerror> { if self.cookie == T::COOKIE { - Some(&mut self.data) - } else { - None - } - } - - pub fn as_result(&self) -> Result<&T, CUresult> { - if self.cookie == T::COOKIE { - Ok(&self.data) - } else { - Err(T::LIVENESS_FAIL) - } - } - - pub fn as_result_mut(&mut self) -> Result<&mut T, CUresult> { - if self.cookie == T::COOKIE { - Ok(&mut self.data) + Ok(unsafe { self.data.assume_init_ref() }) } else { Err(T::LIVENESS_FAIL) } } + // This looks like nonsense, but it's not. There are two cases: + // Err(CUerror) -> meaning that the object is invalid, this pointer does not point into valid memory + // Ok(maybe_error) -> meaning that the object is valid, we dropped everything, but there *might* + // an error in the underlying runtime that we want to propagate #[must_use] - pub fn try_drop(&mut self) -> Result<(), CUresult> { + fn drop_checked(&mut self) -> Result, CUerror> { if self.cookie == T::COOKIE { self.cookie = 0; - self.data.try_drop()?; - unsafe { ManuallyDrop::drop(&mut self.data) }; - return Ok(()); - } - Err(T::LIVENESS_FAIL) - } -} - -impl Drop for LiveCheck { - fn drop(&mut self) { - self.cookie = 0; - } -} - -pub trait CudaRepr: Sized { - type Impl: Sized; -} - -impl CudaRepr for *mut T { - type Impl = *mut T::Impl; -} - -pub trait Decuda { - fn decuda(self: Self) -> To; -} - -impl Decuda<*mut T::Impl> for *mut T { - fn decuda(self: Self) -> *mut T::Impl { - self as *mut _ - } -} - -impl From> for CUresult { - fn from(_: TryLockError) -> Self { - CUresult::CUDA_ERROR_ILLEGAL_STATE - } -} - -impl From for CUresult { - fn from(result: ocl_core::Error) -> Self { - match result { - _ => CUresult::CUDA_ERROR_UNKNOWN, - } - } -} - -impl From for CUresult { - fn from(result: hip_runtime_sys::hipError_t) -> Self { - match result { - hip_runtime_sys::hipError_t::hipErrorRuntimeMemory - | hip_runtime_sys::hipError_t::hipErrorRuntimeOther => CUresult::CUDA_ERROR_UNKNOWN, - hip_runtime_sys::hipError_t(e) => CUresult(e), - } - } -} - -pub trait Encuda { - type To: Sized; - fn encuda(self: Self) -> Self::To; -} - -impl Encuda for CUresult { - type To = CUresult; - fn encuda(self: Self) -> Self::To { - self - } -} - -impl Encuda for () { - type To = CUresult; - fn encuda(self: Self) -> Self::To { - CUresult::CUDA_SUCCESS - } -} - -impl, T2: Encuda> Encuda for Result { - type To = CUresult; - fn encuda(self: Self) -> Self::To { - match self { - Ok(e) => e.encuda(), - Err(e) => e.encuda(), + let result = unsafe { self.data.assume_init_mut().drop_checked() }; + unsafe { MaybeUninit::assume_init_drop(&mut self.data) }; + Ok(result) + } else { + Err(T::LIVENESS_FAIL) } } } -impl Encuda for hipError_t { - type To = CUresult; - fn encuda(self: Self) -> Self::To { - self.into() - } -} - -unsafe fn transmute_lifetime<'a, 'b, T: ?Sized>(t: &'a T) -> &'b T { - mem::transmute(t) -} - -unsafe fn transmute_lifetime_mut<'a, 'b, T: ?Sized>(t: &'a mut T) -> &'b mut T { - mem::transmute(t) +pub fn as_ref<'a, T: ZludaObject>( + handle: &'a T::CudaHandle, +) -> &'a ManuallyDrop>> { + unsafe { mem::transmute(handle) } } -pub fn driver_get_version() -> c_int { - i32::max_value() -} - -impl<'a> CudaRepr for CUdeviceptr { - type Impl = *mut c_void; -} - -impl Decuda<*mut c_void> for CUdeviceptr { - fn decuda(self) -> *mut c_void { - self.0 as *mut _ - } +pub fn drop_checked(handle: T::CudaHandle) -> Result<(), CUerror> { + let mut wrapped_object: ManuallyDrop>> = + unsafe { mem::transmute_copy(&handle) }; + let underlying_error = LiveCheck::drop_checked(&mut wrapped_object)?; + unsafe { ManuallyDrop::drop(&mut wrapped_object) }; + underlying_error } diff --git a/zluda/src/impl/module.rs b/zluda/src/impl/module.rs index 24fa88a8..8b19c1bc 100644 --- a/zluda/src/impl/module.rs +++ b/zluda/src/impl/module.rs @@ -1,261 +1,53 @@ -use std::borrow::Cow; -use std::collections::HashMap; -use std::ffi::{CStr, CString}; -use std::fs::File; -use std::io::{self, Read, Write}; -use std::ops::Add; -use std::os::raw::c_char; -use std::path::{Path, PathBuf}; -use std::process::Command; -use std::{env, fs, iter, mem, ptr, slice}; +use super::ZludaObject; +use cuda_types::*; +use hip_runtime_sys::*; +use std::{ffi::CStr, mem}; -use hip_runtime_sys::{ - hipCtxGetCurrent, hipCtxGetDevice, hipDeviceGetAttribute, hipDeviceGetName, hipDeviceProp_t, - hipError_t, hipGetDeviceProperties, hipGetStreamDeviceId, hipModuleLoadData, -}; -use tempfile::NamedTempFile; - -use crate::cuda::CUmodule; -use crate::hip_call; - -pub struct SpirvModule { - pub binaries: Vec, - pub kernel_info: HashMap, - pub should_link_ptx_impl: Option<(&'static [u8], &'static [u8])>, - pub build_options: CString, +pub(crate) struct Module { + base: hipModule_t, } -impl SpirvModule { - pub fn new_raw<'a>(text: *const c_char) -> Result { - let u8_text = unsafe { CStr::from_ptr(text) }; - let ptx_text = u8_text - .to_str() - .map_err(|_| hipError_t::hipErrorInvalidImage)?; - Self::new(ptx_text) - } +impl ZludaObject for Module { + const COOKIE: usize = 0xe9138bd040487d4a; - pub fn new<'a>(ptx_text: &str) -> Result { - let mut errors = Vec::new(); - let ast = ptx::ModuleParser::new() - .parse(&mut errors, ptx_text) - .map_err(|_| hipError_t::hipErrorInvalidImage)?; - if errors.len() > 0 { - return Err(hipError_t::hipErrorInvalidImage); - } - let spirv_module = - ptx::to_spirv_module(ast).map_err(|_| hipError_t::hipErrorInvalidImage)?; - Ok(SpirvModule { - binaries: spirv_module.assemble(), - kernel_info: spirv_module.kernel_info, - should_link_ptx_impl: spirv_module.should_link_ptx_impl, - build_options: spirv_module.build_options, - }) - } -} + type CudaHandle = CUmodule; -pub(crate) fn load(module: *mut CUmodule, fname: *const i8) -> Result<(), hipError_t> { - let file_name = unsafe { CStr::from_ptr(fname) } - .to_str() - .map_err(|_| hipError_t::hipErrorInvalidValue)?; - let mut file = File::open(file_name).map_err(|_| hipError_t::hipErrorFileNotFound)?; - let mut file_buffer = Vec::new(); - file.read_to_end(&mut file_buffer) - .map_err(|_| hipError_t::hipErrorUnknown)?; - let result = load_data(module, file_buffer.as_ptr() as _); - drop(file_buffer); - result -} - -pub(crate) fn load_data( - module: *mut CUmodule, - image: *const std::ffi::c_void, -) -> Result<(), hipError_t> { - if image == ptr::null() { - return Err(hipError_t::hipErrorInvalidValue); - } - if unsafe { *(image as *const u32) } == 0x464c457f { - return match unsafe { hipModuleLoadData(module as _, image) } { - hipError_t::hipSuccess => Ok(()), - e => Err(e), - }; + fn drop_checked(&mut self) -> CUresult { + unsafe { hipModuleUnload(self.base) }?; + Ok(()) } - let spirv_data = SpirvModule::new_raw(image as *const _)?; - load_data_impl(module, spirv_data) } -pub fn load_data_impl(pmod: *mut CUmodule, spirv_data: SpirvModule) -> Result<(), hipError_t> { +pub(crate) fn load_data(module: &mut CUmodule, image: *const std::ffi::c_void) -> CUresult { + let text = unsafe { CStr::from_ptr(image.cast()) } + .to_str() + .map_err(|_| CUerror::INVALID_VALUE)?; + let ast = ptx_parser::parse_module_checked(text).map_err(|_| CUerror::NO_BINARY_FOR_GPU)?; + let llvm_module = ptx::to_llvm_module(ast).map_err(|_| CUerror::UNKNOWN)?; let mut dev = 0; - hip_call! { hipCtxGetDevice(&mut dev) }; + unsafe { hipCtxGetDevice(&mut dev) }?; let mut props = unsafe { mem::zeroed() }; - hip_call! { hipGetDeviceProperties(&mut props, dev) }; - let arch_binary = compile_amd( - &props, - iter::once(&spirv_data.binaries[..]), - spirv_data.should_link_ptx_impl, + unsafe { hipGetDevicePropertiesR0600(&mut props, dev) }?; + let elf_module = comgr::compile_bitcode( + unsafe { CStr::from_ptr(props.gcnArchName.as_ptr()) }, + &*llvm_module.llvm_ir, + llvm_module.linked_bitcode(), ) - .map_err(|_| hipError_t::hipErrorUnknown)?; - hip_call! { hipModuleLoadData(pmod as _, arch_binary.as_ptr() as _) }; + .map_err(|_| CUerror::UNKNOWN)?; + let mut hip_module = unsafe { mem::zeroed() }; + unsafe { hipModuleLoadData(&mut hip_module, elf_module.as_ptr().cast()) }?; + *module = Module { base: hip_module }.wrap(); Ok(()) } -const LLVM_SPIRV: &'static str = "/home/vosen/amd/llvm-project/build/bin/llvm-spirv"; -const AMDGPU: &'static str = "/opt/rocm/"; -const AMDGPU_TARGET: &'static str = "amdgcn-amd-amdhsa"; -const AMDGPU_BITCODE: [&'static str; 8] = [ - "opencl.bc", - "ocml.bc", - "ockl.bc", - "oclc_correctly_rounded_sqrt_off.bc", - "oclc_daz_opt_on.bc", - "oclc_finite_only_off.bc", - "oclc_unsafe_math_off.bc", - "oclc_wavefrontsize64_off.bc", -]; -const AMDGPU_BITCODE_DEVICE_PREFIX: &'static str = "oclc_isa_version_"; - -pub(crate) fn compile_amd<'a>( - device_pros: &hipDeviceProp_t, - spirv_il: impl Iterator, - ptx_lib: Option<(&'static [u8], &'static [u8])>, -) -> io::Result> { - let null_terminator = device_pros - .gcnArchName - .iter() - .position(|&x| x == 0) - .unwrap(); - let gcn_arch_slice = unsafe { - slice::from_raw_parts(device_pros.gcnArchName.as_ptr() as _, null_terminator + 1) - }; - let device_name = - if let Ok(Ok(name)) = CStr::from_bytes_with_nul(gcn_arch_slice).map(|x| x.to_str()) { - name - } else { - return Err(io::Error::new(io::ErrorKind::Other, "")); - }; - let dir = tempfile::tempdir()?; - let llvm_spirv_path = match env::var("LLVM_SPIRV") { - Ok(path) => Cow::Owned(path), - Err(_) => Cow::Borrowed(LLVM_SPIRV), - }; - let llvm_files = spirv_il - .map(|spirv| { - let mut spirv_file = NamedTempFile::new_in(&dir)?; - let spirv_u8 = unsafe { - slice::from_raw_parts( - spirv.as_ptr() as *const u8, - spirv.len() * mem::size_of::(), - ) - }; - spirv_file.write_all(spirv_u8)?; - if cfg!(debug_assertions) { - persist_file(spirv_file.path())?; - } - let llvm = NamedTempFile::new_in(&dir)?; - let to_llvm_cmd = Command::new(&*llvm_spirv_path) - //.arg("--spirv-debug") - .arg("-r") - .arg("-o") - .arg(llvm.path()) - .arg(spirv_file.path()) - .status()?; - assert!(to_llvm_cmd.success()); - if cfg!(debug_assertions) { - persist_file(llvm.path())?; - } - Ok::<_, io::Error>(llvm) - }) - .collect::, _>>()?; - let linked_binary = NamedTempFile::new_in(&dir)?; - let mut llvm_link = PathBuf::from(AMDGPU); - llvm_link.push("llvm"); - llvm_link.push("bin"); - llvm_link.push("llvm-link"); - let mut linker_cmd = Command::new(&llvm_link); - linker_cmd - .arg("-o") - .arg(linked_binary.path()) - .args(llvm_files.iter().map(|f| f.path())) - .args(get_bitcode_paths(device_name)); - if cfg!(debug_assertions) { - linker_cmd.arg("-v"); - } - let status = linker_cmd.status()?; - assert!(status.success()); - if cfg!(debug_assertions) { - persist_file(linked_binary.path())?; - } - let mut ptx_lib_bitcode = NamedTempFile::new_in(&dir)?; - let compiled_binary = NamedTempFile::new_in(&dir)?; - let mut clang_exe = PathBuf::from(AMDGPU); - clang_exe.push("llvm"); - clang_exe.push("bin"); - clang_exe.push("clang"); - let mut compiler_cmd = Command::new(&clang_exe); - compiler_cmd - .arg(format!("-mcpu={}", device_name)) - .arg("-ffp-contract=off") - .arg("-nogpulib") - .arg("-mno-wavefrontsize64") - .arg("-O3") - .arg("-Xclang") - .arg("-O3") - .arg("-Xlinker") - .arg("--no-undefined") - .arg("-target") - .arg(AMDGPU_TARGET) - .arg("-o") - .arg(compiled_binary.path()) - .arg("-x") - .arg("ir") - .arg(linked_binary.path()); - if let Some((_, bitcode)) = ptx_lib { - ptx_lib_bitcode.write_all(bitcode)?; - compiler_cmd.arg(ptx_lib_bitcode.path()); - }; - if cfg!(debug_assertions) { - compiler_cmd.arg("-v"); - } - let status = compiler_cmd.status()?; - assert!(status.success()); - let mut result = Vec::new(); - let compiled_bin_path = compiled_binary.path(); - let mut compiled_binary = File::open(compiled_bin_path)?; - compiled_binary.read_to_end(&mut result)?; - if cfg!(debug_assertions) { - persist_file(compiled_bin_path)?; - } - Ok(result) -} - -fn persist_file(path: &Path) -> io::Result<()> { - let mut persistent = PathBuf::from("/tmp/zluda"); - std::fs::create_dir_all(&persistent)?; - persistent.push(path.file_name().unwrap()); - std::fs::copy(path, persistent)?; - Ok(()) +pub(crate) fn unload(hmod: CUmodule) -> CUresult { + super::drop_checked::(hmod) } -fn get_bitcode_paths(device_name: &str) -> impl Iterator { - let generic_paths = AMDGPU_BITCODE.iter().map(|x| { - let mut path = PathBuf::from(AMDGPU); - path.push("amdgcn"); - path.push("bitcode"); - path.push(x); - path - }); - let suffix = if let Some(suffix_idx) = device_name.find(':') { - suffix_idx - } else { - device_name.len() - }; - let mut additional_path = PathBuf::from(AMDGPU); - additional_path.push("amdgcn"); - additional_path.push("bitcode"); - additional_path.push(format!( - "{}{}{}", - AMDGPU_BITCODE_DEVICE_PREFIX, - &device_name[3..suffix], - ".bc" - )); - generic_paths.chain(std::iter::once(additional_path)) +pub(crate) fn get_function( + hfunc: &mut hipFunction_t, + hmod: &Module, + name: *const ::core::ffi::c_char, +) -> hipError_t { + unsafe { hipModuleGetFunction(hfunc, hmod.base, name) } } diff --git a/zluda/src/impl/pointer.rs b/zluda/src/impl/pointer.rs index 2b925cd2..6b458a0f 100644 --- a/zluda/src/impl/pointer.rs +++ b/zluda/src/impl/pointer.rs @@ -1,53 +1,40 @@ -use std::{ffi::c_void, mem, ptr}; - -use hip_runtime_sys::{hipError_t, hipMemoryType, hipPointerGetAttributes}; - -use crate::{ - cuda::{CUdeviceptr, CUmemorytype, CUpointer_attribute}, - hip_call, -}; +use cuda_types::*; +use hip_runtime_sys::*; +use std::{ffi::c_void, ptr}; pub(crate) unsafe fn get_attribute( data: *mut c_void, - attribute: CUpointer_attribute, - ptr: CUdeviceptr, -) -> Result<(), hipError_t> { + attribute: hipPointer_attribute, + ptr: hipDeviceptr_t, +) -> hipError_t { if data == ptr::null_mut() { - return Err(hipError_t::hipErrorInvalidValue); + return hipError_t::ErrorInvalidValue; } - let mut attribs = mem::zeroed(); - hip_call! { hipPointerGetAttributes(&mut attribs, ptr.0 as _) }; match attribute { - CUpointer_attribute::CU_POINTER_ATTRIBUTE_CONTEXT => { - *(data as *mut _) = attribs.device; - Ok(()) - } - CUpointer_attribute::CU_POINTER_ATTRIBUTE_MEMORY_TYPE => { - *(data as *mut _) = memory_type(attribs.memoryType)?; - Ok(()) - } - CUpointer_attribute::CU_POINTER_ATTRIBUTE_DEVICE_POINTER => { - *(data as *mut _) = attribs.devicePointer; - Ok(()) - } - CUpointer_attribute::CU_POINTER_ATTRIBUTE_HOST_POINTER => { - *(data as *mut _) = attribs.hostPointer; - Ok(()) - } - CUpointer_attribute::CU_POINTER_ATTRIBUTE_IS_MANAGED => { - *(data as *mut _) = attribs.isManaged; + // TODO: implement by getting device ordinal & allocation start, + // then go through every context for that device + hipPointer_attribute::HIP_POINTER_ATTRIBUTE_CONTEXT => hipError_t::ErrorNotSupported, + hipPointer_attribute::HIP_POINTER_ATTRIBUTE_MEMORY_TYPE => { + let mut hip_result = hipMemoryType(0); + hipPointerGetAttribute( + (&mut hip_result as *mut hipMemoryType).cast::(), + attribute, + ptr, + )?; + let cuda_result = memory_type(hip_result)?; + unsafe { *(data.cast()) = cuda_result }; Ok(()) } - _ => Err(hipError_t::hipErrorNotSupported), + _ => unsafe { hipPointerGetAttribute(data, attribute, ptr) }, } } -pub(crate) fn memory_type(cu: hipMemoryType) -> Result { +fn memory_type(cu: hipMemoryType) -> Result { match cu { hipMemoryType::hipMemoryTypeHost => Ok(CUmemorytype::CU_MEMORYTYPE_HOST), hipMemoryType::hipMemoryTypeDevice => Ok(CUmemorytype::CU_MEMORYTYPE_DEVICE), hipMemoryType::hipMemoryTypeArray => Ok(CUmemorytype::CU_MEMORYTYPE_ARRAY), hipMemoryType::hipMemoryTypeUnified => Ok(CUmemorytype::CU_MEMORYTYPE_UNIFIED), - _ => Err(hipError_t::hipErrorInvalidValue), + _ => Err(hipErrorCode_t::InvalidValue), } } diff --git a/zluda/src/impl/test.rs b/zluda/src/impl/test.rs deleted file mode 100644 index b36ccd8d..00000000 --- a/zluda/src/impl/test.rs +++ /dev/null @@ -1,157 +0,0 @@ -#![allow(non_snake_case)] - -use crate::cuda as zluda; -use crate::cuda::CUstream; -use crate::cuda::CUuuid; -use crate::{ - cuda::{CUdevice, CUdeviceptr}, - r#impl::CUresult, -}; -use ::std::{ - ffi::c_void, - os::raw::{c_int, c_uint}, -}; -use cuda_driver_sys as cuda; - -#[macro_export] -macro_rules! cuda_driver_test { - ($func:ident) => { - paste! { - #[test] - fn [<$func _zluda>]() { - $func::() - } - - #[test] - fn [<$func _cuda>]() { - $func::() - } - } - }; -} - -pub trait CudaDriverFns { - fn cuInit(flags: c_uint) -> CUresult; - fn cuCtxCreate_v2(pctx: *mut *mut c_void, flags: c_uint, dev: c_int) -> CUresult; - fn cuCtxDestroy_v2(ctx: *mut c_void) -> CUresult; - fn cuCtxPopCurrent_v2(pctx: *mut *mut c_void) -> CUresult; - fn cuCtxGetApiVersion(ctx: *mut c_void, version: *mut c_uint) -> CUresult; - fn cuCtxGetCurrent(pctx: *mut *mut c_void) -> CUresult; - fn cuMemAlloc_v2(dptr: *mut *mut c_void, bytesize: usize) -> CUresult; - fn cuDeviceGetUuid(uuid: *mut CUuuid, dev: c_int) -> CUresult; - fn cuDevicePrimaryCtxGetState(dev: c_int, flags: *mut c_uint, active: *mut c_int) -> CUresult; - fn cuStreamGetCtx(hStream: CUstream, pctx: *mut *mut c_void) -> CUresult; - fn cuStreamCreate(stream: *mut CUstream, flags: c_uint) -> CUresult; - fn cuMemFree_v2(mem: *mut c_void) -> CUresult; - fn cuStreamDestroy_v2(stream: CUstream) -> CUresult; -} - -pub struct Zluda(); - -impl CudaDriverFns for Zluda { - fn cuInit(_flags: c_uint) -> CUresult { - zluda::cuInit(_flags as _) - } - - fn cuCtxCreate_v2(pctx: *mut *mut c_void, flags: c_uint, dev: c_int) -> CUresult { - zluda::cuCtxCreate_v2(pctx as *mut _, flags, CUdevice(dev)) - } - - fn cuCtxDestroy_v2(ctx: *mut c_void) -> CUresult { - zluda::cuCtxDestroy_v2(ctx as *mut _) - } - - fn cuCtxPopCurrent_v2(pctx: *mut *mut c_void) -> CUresult { - zluda::cuCtxPopCurrent_v2(pctx as *mut _) - } - - fn cuCtxGetApiVersion(ctx: *mut c_void, version: *mut c_uint) -> CUresult { - zluda::cuCtxGetApiVersion(ctx as *mut _, version) - } - - fn cuCtxGetCurrent(pctx: *mut *mut c_void) -> CUresult { - zluda::cuCtxGetCurrent(pctx as *mut _) - } - fn cuMemAlloc_v2(dptr: *mut *mut c_void, bytesize: usize) -> CUresult { - zluda::cuMemAlloc_v2(dptr as *mut _, bytesize) - } - - fn cuDeviceGetUuid(uuid: *mut CUuuid, dev: c_int) -> CUresult { - zluda::cuDeviceGetUuid(uuid, CUdevice(dev)) - } - - fn cuDevicePrimaryCtxGetState(dev: c_int, flags: *mut c_uint, active: *mut c_int) -> CUresult { - zluda::cuDevicePrimaryCtxGetState(CUdevice(dev), flags, active) - } - - fn cuStreamGetCtx(hStream: CUstream, pctx: *mut *mut c_void) -> CUresult { - zluda::cuStreamGetCtx(hStream, pctx as _) - } - - fn cuStreamCreate(stream: *mut CUstream, flags: c_uint) -> CUresult { - zluda::cuStreamCreate(stream, flags) - } - - fn cuMemFree_v2(dptr: *mut c_void) -> CUresult { - zluda::cuMemFree_v2(CUdeviceptr(dptr as _)) - } - - fn cuStreamDestroy_v2(stream: CUstream) -> CUresult { - zluda::cuStreamDestroy_v2(stream) - } -} - -pub struct Cuda(); - -impl CudaDriverFns for Cuda { - fn cuInit(flags: c_uint) -> CUresult { - unsafe { CUresult(cuda::cuInit(flags) as c_uint) } - } - - fn cuCtxCreate_v2(pctx: *mut *mut c_void, flags: c_uint, dev: c_int) -> CUresult { - unsafe { CUresult(cuda::cuCtxCreate_v2(pctx as *mut _, flags, dev) as c_uint) } - } - - fn cuCtxDestroy_v2(ctx: *mut c_void) -> CUresult { - unsafe { CUresult(cuda::cuCtxDestroy_v2(ctx as *mut _) as c_uint) } - } - - fn cuCtxPopCurrent_v2(pctx: *mut *mut c_void) -> CUresult { - unsafe { CUresult(cuda::cuCtxPopCurrent_v2(pctx as *mut _) as c_uint) } - } - - fn cuCtxGetApiVersion(ctx: *mut c_void, version: *mut c_uint) -> CUresult { - unsafe { CUresult(cuda::cuCtxGetApiVersion(ctx as *mut _, version) as c_uint) } - } - - fn cuCtxGetCurrent(pctx: *mut *mut c_void) -> CUresult { - unsafe { CUresult(cuda::cuCtxGetCurrent(pctx as *mut _) as c_uint) } - } - fn cuMemAlloc_v2(dptr: *mut *mut c_void, bytesize: usize) -> CUresult { - unsafe { CUresult(cuda::cuMemAlloc_v2(dptr as *mut _, bytesize) as c_uint) } - } - - fn cuDeviceGetUuid(uuid: *mut CUuuid, dev: c_int) -> CUresult { - unsafe { CUresult(cuda::cuDeviceGetUuid(uuid as *mut _, dev) as c_uint) } - } - - fn cuDevicePrimaryCtxGetState(dev: c_int, flags: *mut c_uint, active: *mut c_int) -> CUresult { - unsafe { CUresult(cuda::cuDevicePrimaryCtxGetState(dev, flags, active) as c_uint) } - } - - fn cuStreamGetCtx(hStream: CUstream, pctx: *mut *mut c_void) -> CUresult { - unsafe { CUresult(cuda::cuStreamGetCtx(hStream as _, pctx as _) as c_uint) } - } - - fn cuStreamCreate(stream: *mut CUstream, flags: c_uint) -> CUresult { - unsafe { CUresult(cuda::cuStreamCreate(stream as _, flags as _) as c_uint) } - } - - fn cuMemFree_v2(mem: *mut c_void) -> CUresult { - unsafe { CUresult(cuda::cuMemFree_v2(mem as _) as c_uint) } - } - - fn cuStreamDestroy_v2(stream: CUstream) -> CUresult { - unsafe { CUresult(cuda::cuStreamDestroy_v2(stream as _) as c_uint) } - } -} diff --git a/zluda/src/lib.rs b/zluda/src/lib.rs index 72ca51c0..1568f47a 100644 --- a/zluda/src/lib.rs +++ b/zluda/src/lib.rs @@ -1,13 +1,79 @@ -#[macro_use] -extern crate lazy_static; -#[cfg(test)] -extern crate cuda_driver_sys; -#[cfg(test)] -#[macro_use] -extern crate paste; -extern crate ptx; - -#[allow(warnings)] -pub mod cuda; -mod cuda_impl; pub(crate) mod r#impl; + +macro_rules! unimplemented { + ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => { + $( + #[cfg_attr(not(test), no_mangle)] + #[allow(improper_ctypes)] + #[allow(improper_ctypes_definitions)] + pub unsafe extern $abi fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type { + crate::r#impl::unimplemented() + } + )* + }; +} + +macro_rules! implemented { + ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => { + $( + #[cfg_attr(not(test), no_mangle)] + #[allow(improper_ctypes)] + #[allow(improper_ctypes_definitions)] + pub unsafe extern $abi fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type { + cuda_base::cuda_normalize_fn!( crate::r#impl::$fn_name ) ($(crate::r#impl::FromCuda::from_cuda(&$arg_id)?),*)?; + Ok(()) + } + )* + }; +} + +macro_rules! implemented_in_function { + ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => { + $( + #[cfg_attr(not(test), no_mangle)] + #[allow(improper_ctypes)] + #[allow(improper_ctypes_definitions)] + pub unsafe extern $abi fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type { + cuda_base::cuda_normalize_fn!( crate::r#impl::function::$fn_name ) ($(crate::r#impl::FromCuda::from_cuda(&$arg_id)?),*)?; + Ok(()) + } + )* + }; +} + +cuda_base::cuda_function_declarations!( + unimplemented, + implemented <= [ + cuCtxGetLimit, + cuCtxSetCurrent, + cuCtxSetLimit, + cuCtxSynchronize, + cuDeviceComputeCapability, + cuDeviceGet, + cuDeviceGetAttribute, + cuDeviceGetCount, + cuDeviceGetLuid, + cuDeviceGetName, + cuDevicePrimaryCtxRelease, + cuDevicePrimaryCtxRetain, + cuDeviceGetProperties, + cuDeviceGetUuid, + cuDeviceGetUuid_v2, + cuDeviceTotalMem_v2, + cuDriverGetVersion, + cuFuncGetAttribute, + cuInit, + cuMemAlloc_v2, + cuMemFree_v2, + cuMemcpyDtoH_v2, + cuMemcpyHtoD_v2, + cuModuleGetFunction, + cuModuleLoadData, + cuModuleUnload, + cuPointerGetAttribute, + cuMemGetAddressRange_v2, + ], + implemented_in_function <= [ + cuLaunchKernel, + ] +); \ No newline at end of file diff --git a/zluda_bindgen/Cargo.toml b/zluda_bindgen/Cargo.toml new file mode 100644 index 00000000..791ad2cb --- /dev/null +++ b/zluda_bindgen/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "zluda_bindgen" +version = "0.1.0" +edition = "2021" + +[dependencies] +bindgen = "0.70" +syn = { version = "2.0", features = ["full", "visit-mut"] } +proc-macro2 = "1.0.89" +quote = "1.0" +prettyplease = "0.2.25" +rustc-hash = "1.1.0" diff --git a/zluda_bindgen/build/cuda_wrapper.h b/zluda_bindgen/build/cuda_wrapper.h new file mode 100644 index 00000000..a5502560 --- /dev/null +++ b/zluda_bindgen/build/cuda_wrapper.h @@ -0,0 +1,7 @@ +#define __CUDA_API_VERSION_INTERNAL +#include +#include +#include +#include +#include +#include diff --git a/zluda_bindgen/src/main.rs b/zluda_bindgen/src/main.rs new file mode 100644 index 00000000..7332254a --- /dev/null +++ b/zluda_bindgen/src/main.rs @@ -0,0 +1,703 @@ +use proc_macro2::Span; +use quote::{format_ident, quote, ToTokens}; +use rustc_hash::{FxHashMap, FxHashSet}; +use std::{collections::hash_map, fs::File, io::Write, iter, path::PathBuf, str::FromStr}; +use syn::{ + parse_quote, punctuated::Punctuated, visit_mut::VisitMut, Abi, Fields, FieldsUnnamed, FnArg, + ForeignItem, ForeignItemFn, Ident, Item, ItemConst, ItemForeignMod, ItemUse, LitStr, Path, + PathArguments, Signature, Type, TypePath, UseTree, +}; + +fn main() { + let crate_root = PathBuf::from_str(env!("CARGO_MANIFEST_DIR")).unwrap(); + generate_hip_runtime( + &crate_root, + &["..", "ext", "hip_runtime-sys", "src", "lib.rs"], + ); + let cuda_header = bindgen::Builder::default() + .use_core() + .rust_target(bindgen::RustTarget::Stable_1_77) + .layout_tests(false) + .default_enum_style(bindgen::EnumVariation::NewType { + is_bitfield: false, + is_global: false, + }) + .derive_hash(true) + .derive_eq(true) + .header_contents("cuda_wrapper.h", include_str!("../build/cuda_wrapper.h")) + .allowlist_type("^CU.*") + .allowlist_function("^cu.*") + .allowlist_var("^CU.*") + .must_use_type("cudaError_enum") + .constified_enum("cudaError_enum") + .no_partialeq("CUDA_HOST_NODE_PARAMS_st") + .new_type_alias(r"^CUdeviceptr_v\d+$") + .new_type_alias(r"^CUcontext$") + .new_type_alias(r"^CUstream$") + .new_type_alias(r"^CUmodule$") + .new_type_alias(r"^CUfunction$") + .new_type_alias(r"^CUlibrary$") + .clang_args(["-I/usr/local/cuda/include"]) + .generate() + .unwrap() + .to_string(); + let module: syn::File = syn::parse_str(&cuda_header).unwrap(); + generate_functions(&crate_root, &["..", "cuda_base", "src", "cuda.rs"], &module); + generate_types(&crate_root, &["..", "cuda_types", "src", "lib.rs"], &module); + generate_display( + &crate_root, + &["..", "zluda_dump", "src", "format_generated.rs"], + "cuda_types", + &module, + ) +} + +fn generate_hip_runtime(output: &PathBuf, path: &[&str]) { + let hiprt_header = bindgen::Builder::default() + .use_core() + .rust_target(bindgen::RustTarget::Stable_1_77) + .layout_tests(false) + .default_enum_style(bindgen::EnumVariation::NewType { + is_bitfield: false, + is_global: false, + }) + .derive_hash(true) + .derive_eq(true) + .header("/opt/rocm/include/hip/hip_runtime_api.h") + .allowlist_type("^hip.*") + .allowlist_function("^hip.*") + .allowlist_var("^hip.*") + .must_use_type("hipError_t") + .constified_enum("hipError_t") + .new_type_alias("^hipDeviceptr_t$") + .new_type_alias("^hipStream_t$") + .new_type_alias("^hipModule_t$") + .new_type_alias("^hipFunction_t$") + .clang_args(["-I/opt/rocm/include", "-D__HIP_PLATFORM_AMD__"]) + .generate() + .unwrap() + .to_string(); + let mut module: syn::File = syn::parse_str(&hiprt_header).unwrap(); + let mut converter = ConvertIntoRustResult { + type_: "hipError_t", + underlying_type: "hipError_t", + new_error_type: "hipErrorCode_t", + error_prefix: ("hipError", "Error"), + success: ("hipSuccess", "Success"), + constants: Vec::new(), + }; + module.items = module + .items + .into_iter() + .filter_map(|item| match item { + Item::Const(const_) => converter.get_const(const_).map(Item::Const), + Item::Use(use_) => converter.get_use(use_).map(Item::Use), + Item::Type(type_) => converter.get_type(type_).map(Item::Type), + item => Some(item), + }) + .collect::>(); + converter.flush(&mut module.items); + add_send_sync( + &mut module.items, + &[ + "hipDeviceptr_t", + "hipStream_t", + "hipModule_t", + "hipFunction_t", + ], + ); + let mut output = output.clone(); + output.extend(path); + write_rust_to_file(output, &prettyplease::unparse(&module)) +} + +fn add_send_sync(items: &mut Vec, arg: &[&str]) { + for type_ in arg { + let type_ = Ident::new(type_, Span::call_site()); + items.extend([ + parse_quote! { + unsafe impl Send for #type_ {} + }, + parse_quote! { + unsafe impl Sync for #type_ {} + }, + ]); + } +} + +fn generate_functions(output: &PathBuf, path: &[&str], module: &syn::File) { + let fns_ = module.items.iter().filter_map(|item| match item { + Item::ForeignMod(extern_) => match &*extern_.items { + [ForeignItem::Fn(fn_)] => Some(fn_), + _ => unreachable!(), + }, + _ => None, + }); + let mut module: syn::File = parse_quote! { + extern "system" { + #(#fns_)* + } + }; + syn::visit_mut::visit_file_mut(&mut PrependCudaPath, &mut module); + syn::visit_mut::visit_file_mut(&mut RemoveVisibility, &mut module); + syn::visit_mut::visit_file_mut(&mut ExplicitReturnType, &mut module); + let mut output = output.clone(); + output.extend(path); + write_rust_to_file(output, &prettyplease::unparse(&module)) +} + +fn generate_types(output: &PathBuf, path: &[&str], module: &syn::File) { + let mut module = module.clone(); + let mut converter = ConvertIntoRustResult { + type_: "CUresult", + underlying_type: "cudaError_enum", + new_error_type: "CUerror", + error_prefix: ("CUDA_ERROR_", "ERROR_"), + success: ("CUDA_SUCCESS", "SUCCESS"), + constants: Vec::new(), + }; + module.items = module + .items + .into_iter() + .filter_map(|item| match item { + Item::ForeignMod(_) => None, + Item::Const(const_) => converter.get_const(const_).map(Item::Const), + Item::Use(use_) => converter.get_use(use_).map(Item::Use), + Item::Type(type_) => converter.get_type(type_).map(Item::Type), + Item::Struct(mut struct_) => { + let ident_string = struct_.ident.to_string(); + match &*ident_string { + "CUdeviceptr_v2" => { + struct_.fields = Fields::Unnamed(parse_quote! { + (pub *mut ::core::ffi::c_void) + }); + } + "CUuuid_st" => { + struct_.fields = Fields::Named(parse_quote! { + {pub bytes: [::core::ffi::c_uchar; 16usize]} + }); + } + _ => {} + } + Some(Item::Struct(struct_)) + } + item => Some(item), + }) + .collect::>(); + converter.flush(&mut module.items); + module.items.push(parse_quote! { + impl From for CUerror { + fn from(error: hip_runtime_sys::hipErrorCode_t) -> Self { + Self(error.0) + } + } + }); + add_send_sync( + &mut module.items, + &[ + "CUdeviceptr", + "CUcontext", + "CUstream", + "CUmodule", + "CUfunction", + "CUlibrary", + ], + ); + syn::visit_mut::visit_file_mut(&mut FixAbi, &mut module); + let mut output = output.clone(); + output.extend(path); + write_rust_to_file(output, &prettyplease::unparse(&module)) +} + +fn write_rust_to_file(path: impl AsRef, content: &str) { + let mut file = File::create(path).unwrap(); + file.write("// Generated automatically by zluda_bindgen\n// DO NOT EDIT MANUALLY\n#![allow(warnings)]\n".as_bytes()) + .unwrap(); + file.write(content.as_bytes()).unwrap(); +} + +struct ConvertIntoRustResult { + type_: &'static str, + underlying_type: &'static str, + new_error_type: &'static str, + error_prefix: (&'static str, &'static str), + success: (&'static str, &'static str), + constants: Vec, +} + +impl ConvertIntoRustResult { + fn get_const(&mut self, const_: syn::ItemConst) -> Option { + let name = const_.ident.to_string(); + if name.starts_with(self.underlying_type) { + self.constants.push(const_); + None + } else { + Some(const_) + } + } + + fn get_use(&mut self, use_: ItemUse) -> Option { + if let UseTree::Path(ref path) = use_.tree { + if let UseTree::Rename(ref rename) = &*path.tree { + if rename.rename == self.type_ { + return None; + } + } + } + Some(use_) + } + + fn flush(self, items: &mut Vec) { + let type_ = format_ident!("{}", self.type_); + let type_trait = format_ident!("{}Consts", self.type_); + let new_error_type = format_ident!("{}", self.new_error_type); + let success = format_ident!("{}", self.success.1); + let mut result_variants = Vec::new(); + let mut error_variants = Vec::new(); + for const_ in self.constants.iter() { + let ident = const_.ident.to_string(); + if ident.ends_with(self.success.0) { + result_variants.push(quote! { + const #success: #type_ = #type_::Ok(()); + }); + } else { + let old_prefix_len = self.underlying_type.len() + 1 + self.error_prefix.0.len(); + let variant_ident = + format_ident!("{}{}", self.error_prefix.1, &ident[old_prefix_len..]); + let error_ident = format_ident!("{}", &ident[old_prefix_len..]); + let expr = &const_.expr; + result_variants.push(quote! { + const #variant_ident: #type_ = #type_::Err(#new_error_type::#error_ident); + }); + error_variants.push(quote! { + pub const #error_ident: #new_error_type = #new_error_type(unsafe { ::core::num::NonZeroU32::new_unchecked(#expr) }); + }); + } + } + let extra_items: Punctuated = parse_quote! { + impl #new_error_type { + #(#error_variants)* + } + #[repr(transparent)] + #[derive(Debug, Hash, Copy, Clone, PartialEq, Eq)] + pub struct #new_error_type(pub ::core::num::NonZeroU32); + + pub trait #type_trait { + #(#result_variants)* + } + impl #type_trait for #type_ {} + #[must_use] + pub type #type_ = ::core::result::Result<(), #new_error_type>; + const _: fn() = || { + let _ = std::mem::transmute::<#type_, u32>; + }; + }; + items.extend(extra_items); + } + + fn get_type(&self, type_: syn::ItemType) -> Option { + if type_.ident.to_string() == self.type_ { + None + } else { + Some(type_) + } + } +} + +struct FixAbi; + +impl VisitMut for FixAbi { + fn visit_abi_mut(&mut self, i: &mut Abi) { + if let Some(ref mut name) = i.name { + *name = LitStr::new("system", Span::call_site()); + } + } +} + +struct PrependCudaPath; + +impl VisitMut for PrependCudaPath { + fn visit_type_path_mut(&mut self, type_: &mut TypePath) { + if type_.path.segments.len() == 1 { + match &*type_.path.segments[0].ident.to_string() { + "usize" | "f64" | "f32" => {} + _ => { + *type_ = parse_quote! { cuda_types :: #type_ }; + } + } + } + } +} + +struct RemoveVisibility; + +impl VisitMut for RemoveVisibility { + fn visit_visibility_mut(&mut self, i: &mut syn::Visibility) { + *i = syn::Visibility::Inherited; + } +} + +struct ExplicitReturnType; + +impl VisitMut for ExplicitReturnType { + fn visit_return_type_mut(&mut self, i: &mut syn::ReturnType) { + if let syn::ReturnType::Default = i { + *i = parse_quote! { -> {} }; + } + } +} + +fn generate_display( + output: &PathBuf, + path: &[&str], + types_crate: &'static str, + module: &syn::File, +) { + let ignore_types = [ + "CUdevice", + "CUdeviceptr_v1", + "CUarrayMapInfo_st", + "CUDA_RESOURCE_DESC_st", + "CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st", + "CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st", + "CUexecAffinityParam_st", + "CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st", + "CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st", + "CUuuid_st", + "HGPUNV", + "EGLint", + "EGLSyncKHR", + "EGLImageKHR", + "EGLStreamKHR", + "CUasyncNotificationInfo_st", + "CUgraphNodeParams_st", + "CUeglFrame_st", + "CUdevResource_st", + "CUlaunchAttribute_st", + "CUlaunchConfig_st", + ]; + let ignore_functions = [ + "cuGLGetDevices", + "cuGLGetDevices_v2", + "cuStreamSetAttribute", + "cuStreamSetAttribute_ptsz", + "cuStreamGetAttribute", + "cuStreamGetAttribute_ptsz", + "cuGraphKernelNodeGetAttribute", + "cuGraphKernelNodeSetAttribute", + ]; + let count_selectors = [ + ("cuCtxCreate_v3", 1, 2), + ("cuMemMapArrayAsync", 0, 1), + ("cuMemMapArrayAsync_ptsz", 0, 1), + ("cuStreamBatchMemOp", 2, 1), + ("cuStreamBatchMemOp_ptsz", 2, 1), + ("cuStreamBatchMemOp_v2", 2, 1), + ]; + let mut derive_state = DeriveDisplayState::new( + &ignore_types, + types_crate, + &ignore_functions, + &count_selectors, + ); + let mut items = module + .items + .iter() + .filter_map(|i| cuda_derive_display_trait_for_item(&mut derive_state, i)) + .collect::>(); + items.push(curesult_display_trait(&derive_state)); + let mut output = output.clone(); + output.extend(path); + write_rust_to_file( + output, + &prettyplease::unparse(&syn::File { + shebang: None, + attrs: Vec::new(), + items, + }), + ); +} + +struct DeriveDisplayState<'a> { + types_crate: &'static str, + ignore_types: FxHashSet, + ignore_fns: FxHashSet, + enums: FxHashMap<&'a Ident, Vec<&'a Ident>>, + array_arguments: FxHashMap<(Ident, usize), usize>, + result_variants: Vec<&'a ItemConst>, +} + +impl<'a> DeriveDisplayState<'a> { + fn new( + ignore_types: &[&'static str], + types_crate: &'static str, + ignore_fns: &[&'static str], + count_selectors: &[(&'static str, usize, usize)], + ) -> Self { + DeriveDisplayState { + types_crate, + ignore_types: ignore_types + .into_iter() + .map(|x| Ident::new(x, Span::call_site())) + .collect(), + ignore_fns: ignore_fns + .into_iter() + .map(|x| Ident::new(x, Span::call_site())) + .collect(), + array_arguments: count_selectors + .into_iter() + .map(|(name, val, count)| ((Ident::new(name, Span::call_site()), *val), *count)) + .collect(), + enums: Default::default(), + result_variants: Vec::new(), + } + } + + fn record_enum_variant(&mut self, enum_: &'a Ident, variant: &'a Ident) { + match self.enums.entry(enum_) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(variant); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![variant]); + } + } + } +} + +fn cuda_derive_display_trait_for_item<'a>( + state: &mut DeriveDisplayState<'a>, + item: &'a Item, +) -> Option { + let path_prefix = Path::from(Ident::new(state.types_crate, Span::call_site())); + let path_prefix_iter = iter::repeat(&path_prefix); + match item { + Item::Const(const_) => { + if const_.ty.to_token_stream().to_string() == "cudaError_enum" { + state.result_variants.push(const_); + } + None + } + Item::ForeignMod(ItemForeignMod { items, .. }) => match items.last().unwrap() { + ForeignItem::Fn(ForeignItemFn { + sig: Signature { ident, inputs, .. }, + .. + }) => { + if state.ignore_fns.contains(ident) { + return None; + } + let inputs = inputs + .iter() + .map(|fn_arg| { + let mut fn_arg = fn_arg.clone(); + syn::visit_mut::visit_fn_arg_mut(&mut PrependCudaPath, &mut fn_arg); + fn_arg + }) + .collect::>(); + let inputs_iter = inputs.iter(); + let original_fn_name = ident.to_string(); + let mut write_argument = inputs.iter().enumerate().map(|(index, fn_arg)| { + let name = fn_arg_name(fn_arg); + if let Some(length_index) = state.array_arguments.get(&(ident.clone(), index)) { + let length = fn_arg_name(&inputs[*length_index]); + quote! { + writer.write_all(concat!(stringify!(#name), ": ").as_bytes())?; + writer.write_all(b"[")?; + for i in 0..#length { + if i != 0 { + writer.write_all(b", ")?; + } + crate::format::CudaDisplay::write(unsafe { &*#name.add(i as usize) }, #original_fn_name, arg_idx, writer)?; + } + writer.write_all(b"]")?; + } + } else { + quote! { + writer.write_all(concat!(stringify!(#name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&#name, #original_fn_name, arg_idx, writer)?; + } + } + }); + let fn_name = format_ident!("write_{}", ident); + Some(match write_argument.next() { + Some(first_write_argument) => parse_quote! { + pub fn #fn_name(writer: &mut (impl std::io::Write + ?Sized), #(#inputs_iter,)*) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + #first_write_argument + #( + arg_idx += 1; + writer.write_all(b", ")?; + #write_argument + )* + writer.write_all(b")") + } + }, + None => parse_quote! { + pub fn #fn_name(writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { + writer.write_all(b"()") + } + }, + }) + } + _ => unreachable!(), + }, + Item::Impl(ref item_impl) => { + let enum_ = match &*item_impl.self_ty { + Type::Path(ref path) => &path.path.segments.last().unwrap().ident, + _ => unreachable!(), + }; + let variant_ = match item_impl.items.last().unwrap() { + syn::ImplItem::Const(item_const) => &item_const.ident, + _ => unreachable!(), + }; + state.record_enum_variant(enum_, variant_); + None + } + Item::Struct(item_struct) => { + if state.ignore_types.contains(&item_struct.ident) { + return None; + } + if state.enums.contains_key(&item_struct.ident) { + let enum_ = &item_struct.ident; + let enum_iter = iter::repeat(&item_struct.ident); + let variants = state.enums.get(&item_struct.ident).unwrap().iter(); + Some(parse_quote! { + impl crate::format::CudaDisplay for #path_prefix :: #enum_ { + fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { + match self { + #(& #path_prefix_iter :: #enum_iter :: #variants => writer.write_all(stringify!(#variants).as_bytes()),)* + _ => write!(writer, "{}", self.0) + } + } + } + }) + } else { + let struct_ = &item_struct.ident; + match item_struct.fields { + Fields::Named(ref fields) => { + let mut rest_of_fields = fields.named.iter().filter_map(|f| { + let f_ident = f.ident.as_ref().unwrap(); + let name = f_ident.to_string(); + if name.starts_with("reserved") || name == "_unused" { + None + } else { + Some(f_ident) + } + }); + let first_field = match rest_of_fields.next() { + Some(f) => f, + None => return None, + }; + Some(parse_quote! { + impl crate::format::CudaDisplay for #path_prefix :: #struct_ { + fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(#first_field), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.#first_field, "", 0, writer)?; + #( + writer.write_all(concat!(", ", stringify!(#rest_of_fields), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.#rest_of_fields, "", 0, writer)?; + )* + writer.write_all(b" }") + } + } + }) + } + Fields::Unnamed(FieldsUnnamed { ref unnamed, .. }) if unnamed.len() == 1 => { + Some(parse_quote! { + impl crate::format::CudaDisplay for #path_prefix :: #struct_ { + fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { + write!(writer, "{:p}", self.0) + } + } + }) + } + _ => return None, + } + } + } + Item::Type(item_type) => { + if state.ignore_types.contains(&item_type.ident) { + return None; + }; + match &*item_type.ty { + Type::Ptr(_) => { + let type_ = &item_type.ident; + Some(parse_quote! { + impl crate::format::CudaDisplay for #path_prefix :: #type_ { + fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } + } + }) + } + Type::Path(type_path) => { + if type_path.path.leading_colon.is_some() { + let option_seg = type_path.path.segments.last().unwrap(); + if option_seg.ident == "Option" { + match &option_seg.arguments { + PathArguments::AngleBracketed(generic) => match generic.args[0] { + syn::GenericArgument::Type(Type::BareFn(_)) => { + let type_ = &item_type.ident; + return Some(parse_quote! { + impl crate::format::CudaDisplay for #path_prefix :: #type_ { + fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { + write!(writer, "{:p}", unsafe { std::mem::transmute::<#path_prefix :: #type_, *mut ::std::ffi::c_void>(*self) }) + } + } + }); + } + _ => unreachable!(), + }, + _ => unreachable!(), + } + } + } + None + } + _ => unreachable!(), + } + } + Item::Union(_) => None, + Item::Use(_) => None, + _ => unreachable!(), + } +} + +fn fn_arg_name(fn_arg: &FnArg) -> &Box { + let name = if let FnArg::Typed(t) = fn_arg { + &t.pat + } else { + unreachable!() + }; + name +} + +fn curesult_display_trait(derive_state: &DeriveDisplayState) -> syn::Item { + let errors = derive_state.result_variants.iter().filter_map(|const_| { + let prefix = "cudaError_enum_"; + let text = &const_.ident.to_string()[prefix.len()..]; + if text == "CUDA_SUCCESS" { + return None; + } + let expr = &const_.expr; + Some(quote! { + #expr => writer.write_all(#text.as_bytes()), + }) + }); + parse_quote! { + impl crate::format::CudaDisplay for cuda_types::CUresult { + fn write(&self, _fn_name: &'static str, _index: usize, writer: &mut (impl std::io::Write + ?Sized)) -> std::io::Result<()> { + match self { + Ok(()) => writer.write_all(b"CUDA_SUCCESS"), + Err(err) => { + match err.0.get() { + #(#errors)* + err => write!(writer, "{}", err) + } + } + } + } + } + } +} diff --git a/zluda_dump/Cargo.toml b/zluda_dump/Cargo.toml index 79f7b760..50e7c2a6 100644 --- a/zluda_dump/Cargo.toml +++ b/zluda_dump/Cargo.toml @@ -2,7 +2,7 @@ name = "zluda_dump" version = "0.0.0" authors = ["Andrzej Janik "] -edition = "2018" +edition = "2021" [lib] name = "zluda_dump" @@ -10,6 +10,7 @@ crate-type = ["cdylib"] [dependencies] ptx = { path = "../ptx" } +ptx_parser = { path = "../ptx_parser" } lz4-sys = "1.9" regex = "1.4" dynasm = "1.2" diff --git a/zluda_dump/src/dark_api.rs b/zluda_dump/src/dark_api.rs index 8b1cd797..623f96fb 100644 --- a/zluda_dump/src/dark_api.rs +++ b/zluda_dump/src/dark_api.rs @@ -28,6 +28,7 @@ impl Hash for CUuuidWrapper { } } +#[allow(improper_ctypes_definitions)] pub(crate) struct OriginalExports { original_get_module_from_cubin: Option< unsafe extern "system" fn( @@ -356,6 +357,7 @@ unsafe fn record_submodules_from_fatbin( ); } +#[allow(improper_ctypes_definitions)] unsafe extern "system" fn get_module_from_cubin( module: *mut CUmodule, fatbinc_wrapper: *const FatbincWrapper, @@ -388,6 +390,7 @@ unsafe extern "system" fn get_module_from_cubin( ) } +#[allow(improper_ctypes_definitions)] unsafe extern "system" fn get_module_from_cubin_ext1( module: *mut CUmodule, fatbinc_wrapper: *const FatbincWrapper, @@ -451,6 +454,7 @@ unsafe extern "system" fn get_module_from_cubin_ext1( ) } +#[allow(improper_ctypes_definitions)] unsafe extern "system" fn get_module_from_cubin_ext2( fatbin_header: *const FatbinHeader, module: *mut CUmodule, @@ -508,7 +512,7 @@ unsafe extern "system" fn get_module_from_cubin_ext2( .original_get_module_from_cubin_ext2 .unwrap()(fatbin_header, module, ptr1, ptr2, _unknown); fn_logger.result = Some(result); - if result != CUresult::CUDA_SUCCESS { + if result.is_err() { return result; } record_submodules_from_fatbin( diff --git a/zluda_dump/src/format.rs b/zluda_dump/src/format.rs index 380e52db..c1aac616 100644 --- a/zluda_dump/src/format.rs +++ b/zluda_dump/src/format.rs @@ -1,11 +1,10 @@ +use cuda_types::{CUGLDeviceList, CUdevice}; use std::{ ffi::{c_void, CStr}, fmt::LowerHex, mem, ptr, slice, }; -use cuda_base::cuda_derive_display_trait; - pub(crate) trait CudaDisplay { fn write( &self, @@ -27,28 +26,6 @@ impl CudaDisplay for cuda_types::CUuuid { } } -impl CudaDisplay for cuda_types::CUdevice { - fn write( - &self, - _fn_name: &'static str, - _index: usize, - writer: &mut (impl std::io::Write + ?Sized), - ) -> std::io::Result<()> { - write!(writer, "{}", self.0) - } -} - -impl CudaDisplay for cuda_types::CUdeviceptr { - fn write( - &self, - _fn_name: &'static str, - _index: usize, - writer: &mut (impl std::io::Write + ?Sized), - ) -> std::io::Result<()> { - write!(writer, "{:p}", self.0) - } -} - impl CudaDisplay for cuda_types::CUdeviceptr_v1 { fn write( &self, @@ -494,6 +471,59 @@ impl CudaDisplay } } +impl CudaDisplay for cuda_types::CUgraphNodeParams_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + _writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + todo!() + } +} + +impl CudaDisplay for cuda_types::CUlaunchConfig_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + _writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + todo!() + } +} + +impl CudaDisplay for cuda_types::CUeglFrame_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + _writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + todo!() + } +} + +impl CudaDisplay for cuda_types::CUdevResource_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + _writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + todo!() + } +} +impl CudaDisplay for cuda_types::CUlaunchAttribute_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + _writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + todo!() + } +} impl CudaDisplay for *mut T { fn write( &self, @@ -544,34 +574,26 @@ impl CudaDisplay for [T; N] { } } -#[allow(non_snake_case)] -pub fn write_cuStreamBatchMemOp( - writer: &mut (impl std::io::Write + ?Sized), - stream: cuda_types::CUstream, - count: ::std::os::raw::c_uint, - paramArray: *mut cuda_types::CUstreamBatchMemOpParams, - flags: ::std::os::raw::c_uint, -) -> std::io::Result<()> { - writer.write_all(b"(stream: ")?; - CudaDisplay::write(&stream, "cuStreamBatchMemOp", 0, writer)?; - writer.write_all(b", ")?; - writer.write_all(b"count: ")?; - CudaDisplay::write(&count, "cuStreamBatchMemOp", 1, writer)?; - writer.write_all(b", paramArray: [")?; - for i in 0..count { - if i != 0 { - writer.write_all(b", ")?; - } - CudaDisplay::write( - &unsafe { paramArray.add(i as usize) }, - "cuStreamBatchMemOp", - 2, - writer, - )?; +impl CudaDisplay for cuda_types::CUarrayMapInfo_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + _writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + todo!() + } +} + +impl CudaDisplay for cuda_types::CUexecAffinityParam_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + _writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + todo!() } - writer.write_all(b"], flags: ")?; - CudaDisplay::write(&flags, "cuStreamBatchMemOp", 3, writer)?; - writer.write_all(b") ") } #[allow(non_snake_case)] @@ -585,27 +607,7 @@ pub fn write_cuGraphKernelNodeGetAttribute( CudaDisplay::write(&hNode, "cuGraphKernelNodeGetAttribute", 0, writer)?; writer.write_all(b", attr: ")?; CudaDisplay::write(&attr, "cuGraphKernelNodeGetAttribute", 1, writer)?; - match attr { - cuda_types::CUkernelNodeAttrID::CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW => { - writer.write_all(b", value_out: ")?; - CudaDisplay::write( - unsafe { &(*value_out).accessPolicyWindow }, - "cuGraphKernelNodeGetAttribute", - 2, - writer, - )?; - } - cuda_types::CUkernelNodeAttrID::CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE => { - writer.write_all(b", value_out: ")?; - CudaDisplay::write( - unsafe { &(*value_out).cooperative }, - "cuGraphKernelNodeGetAttribute", - 2, - writer, - )?; - } - _ => return writer.write_all(b", ...) "), - } + write_launch_attribute(writer, "cuGraphKernelNodeGetAttribute", 2, attr, value_out)?; writer.write_all(b") ") } @@ -630,28 +632,73 @@ pub fn write_cuStreamGetAttribute( CudaDisplay::write(&hStream, "cuStreamGetAttribute", 0, writer)?; writer.write_all(b", attr: ")?; CudaDisplay::write(&attr, "cuStreamGetAttribute", 1, writer)?; - match attr { - cuda_types::CUstreamAttrID::CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW => { + write_launch_attribute(writer, "cuStreamGetAttribute", 2, attr, value_out)?; + writer.write_all(b") ") +} + +fn write_launch_attribute( + writer: &mut (impl std::io::Write + ?Sized), + fn_name: &'static str, + index: usize, + attribute: cuda_types::CUlaunchAttributeID, + value_out: *mut cuda_types::CUstreamAttrValue, +) -> std::io::Result<()> { + match attribute { + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW => { writer.write_all(b", value_out: ")?; CudaDisplay::write( unsafe { &(*value_out).accessPolicyWindow }, - "cuStreamGetAttribute", - 2, + fn_name, + index, writer, - )?; + ) } - cuda_types::CUstreamAttrID::CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY => { + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_COOPERATIVE => { writer.write_all(b", value_out: ")?; - CudaDisplay::write( - unsafe { &(*value_out).syncPolicy }, - "cuStreamGetAttribute", - 2, - writer, - )?; + CudaDisplay::write(unsafe { &(*value_out).cooperative }, fn_name, index, writer) } - _ => return writer.write_all(b", ...) "), + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).syncPolicy }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).clusterDim }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).clusterSchedulingPolicyPreference }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).programmaticStreamSerializationAllowed }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).programmaticEvent }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_PRIORITY => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).priority }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).memSyncDomainMap }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).memSyncDomain }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).launchCompletionEvent }, fn_name, index, writer) + } + cuda_types::CUlaunchAttributeID::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE => { + writer.write_all(b", value_out: ")?; + CudaDisplay::write(unsafe { &(*value_out).deviceUpdatableKernelNode }, fn_name, index, writer) + } + _ => writer.write_all(b", ... "), } - writer.write_all(b") ") } #[allow(non_snake_case)] @@ -685,71 +732,27 @@ pub fn write_cuStreamSetAttribute_ptsz( } #[allow(non_snake_case)] -pub fn write_cuCtxCreate_v3( +pub fn write_cuGLGetDevices( _writer: &mut (impl std::io::Write + ?Sized), - _pctx: *mut cuda_types::CUcontext, - _paramsArray: *mut cuda_types::CUexecAffinityParam, - _numParams: ::std::os::raw::c_int, - _flags: ::std::os::raw::c_uint, - _dev: cuda_types::CUdevice, + _pCudaDeviceCount: *mut ::std::os::raw::c_uint, + _pCudaDevices: *mut CUdevice, + _cudaDeviceCount: ::std::os::raw::c_uint, + _deviceList: CUGLDeviceList, ) -> std::io::Result<()> { todo!() } #[allow(non_snake_case)] -pub fn write_cuCtxGetExecAffinity( +pub fn write_cuGLGetDevices_v2( _writer: &mut (impl std::io::Write + ?Sized), - _pExecAffinity: *mut cuda_types::CUexecAffinityParam, - _type_: cuda_types::CUexecAffinityType, + _pCudaDeviceCount: *mut ::std::os::raw::c_uint, + _pCudaDevices: *mut CUdevice, + _cudaDeviceCount: ::std::os::raw::c_uint, + _deviceList: CUGLDeviceList, ) -> std::io::Result<()> { todo!() } -#[allow(non_snake_case)] -pub fn write_cuMemMapArrayAsync( - _writer: &mut (impl std::io::Write + ?Sized), - _mapInfoList: *mut cuda_types::CUarrayMapInfo, - _count: ::std::os::raw::c_uint, - _hStream: cuda_types::CUstream, -) -> std::io::Result<()> { - todo!() -} - -#[allow(non_snake_case)] -pub fn write_cuMemMapArrayAsync_ptsz( - writer: &mut (impl std::io::Write + ?Sized), - mapInfoList: *mut cuda_types::CUarrayMapInfo, - count: ::std::os::raw::c_uint, - hStream: cuda_types::CUstream, -) -> std::io::Result<()> { - write_cuMemMapArrayAsync(writer, mapInfoList, count, hStream) -} - -cuda_derive_display_trait!( - cuda_types, - CudaDisplay, - [ - CUarrayMapInfo_st, - CUDA_RESOURCE_DESC_st, - CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st, - CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st, - CUexecAffinityParam_st, - CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st, - CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st, - CUuuid_st, - HGPUNV - ], - [ - cuCtxCreate_v3, - cuCtxGetExecAffinity, - cuGraphKernelNodeGetAttribute, - cuGraphKernelNodeSetAttribute, - cuMemMapArrayAsync, - cuMemMapArrayAsync_ptsz, - cuStreamBatchMemOp, - cuStreamGetAttribute, - cuStreamGetAttribute_ptsz, - cuStreamSetAttribute, - cuStreamSetAttribute_ptsz - ] -); +#[path = "format_generated.rs"] +mod format_generated; +pub(crate) use format_generated::*; diff --git a/zluda_dump/src/format_generated.rs b/zluda_dump/src/format_generated.rs new file mode 100644 index 00000000..742fb576 --- /dev/null +++ b/zluda_dump/src/format_generated.rs @@ -0,0 +1,24916 @@ +// Generated automatically by zluda_bindgen +// DO NOT EDIT MANUALLY +#![allow(warnings)] +impl crate::format::CudaDisplay for cuda_types::CUdeviceptr_v2 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", self.0) + } +} +impl crate::format::CudaDisplay for cuda_types::CUcontext { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", self.0) + } +} +impl crate::format::CudaDisplay for cuda_types::CUmodule { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", self.0) + } +} +impl crate::format::CudaDisplay for cuda_types::CUfunction { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", self.0) + } +} +impl crate::format::CudaDisplay for cuda_types::CUlibrary { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", self.0) + } +} +impl crate::format::CudaDisplay for cuda_types::CUkernel { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUarray { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUmipmappedArray { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUtexref { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUsurfref { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUevent { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUstream { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", self.0) + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphicsResource { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUexternalMemory { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphore { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraph { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphNode { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphExec { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemoryPool { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUuserObject { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphDeviceNode { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUasyncCallbackHandle { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemFabricHandle_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(data), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.data, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUipcMem_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUipcMem_flags_enum::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS => { + writer + .write_all(stringify!(CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAttach_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemAttach_flags_enum::CU_MEM_ATTACH_GLOBAL => { + writer.write_all(stringify!(CU_MEM_ATTACH_GLOBAL).as_bytes()) + } + &cuda_types::CUmemAttach_flags_enum::CU_MEM_ATTACH_HOST => { + writer.write_all(stringify!(CU_MEM_ATTACH_HOST).as_bytes()) + } + &cuda_types::CUmemAttach_flags_enum::CU_MEM_ATTACH_SINGLE => { + writer.write_all(stringify!(CU_MEM_ATTACH_SINGLE).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUctx_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_AUTO => { + writer.write_all(stringify!(CU_CTX_SCHED_AUTO).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_SPIN => { + writer.write_all(stringify!(CU_CTX_SCHED_SPIN).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_YIELD => { + writer.write_all(stringify!(CU_CTX_SCHED_YIELD).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_BLOCKING_SYNC => { + writer.write_all(stringify!(CU_CTX_SCHED_BLOCKING_SYNC).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_BLOCKING_SYNC => { + writer.write_all(stringify!(CU_CTX_BLOCKING_SYNC).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_SCHED_MASK => { + writer.write_all(stringify!(CU_CTX_SCHED_MASK).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_MAP_HOST => { + writer.write_all(stringify!(CU_CTX_MAP_HOST).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_LMEM_RESIZE_TO_MAX => { + writer.write_all(stringify!(CU_CTX_LMEM_RESIZE_TO_MAX).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_COREDUMP_ENABLE => { + writer.write_all(stringify!(CU_CTX_COREDUMP_ENABLE).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_USER_COREDUMP_ENABLE => { + writer.write_all(stringify!(CU_CTX_USER_COREDUMP_ENABLE).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_SYNC_MEMOPS => { + writer.write_all(stringify!(CU_CTX_SYNC_MEMOPS).as_bytes()) + } + &cuda_types::CUctx_flags_enum::CU_CTX_FLAGS_MASK => { + writer.write_all(stringify!(CU_CTX_FLAGS_MASK).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUevent_sched_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_AUTO => { + writer.write_all(stringify!(CU_EVENT_SCHED_AUTO).as_bytes()) + } + &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_SPIN => { + writer.write_all(stringify!(CU_EVENT_SCHED_SPIN).as_bytes()) + } + &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_YIELD => { + writer.write_all(stringify!(CU_EVENT_SCHED_YIELD).as_bytes()) + } + &cuda_types::CUevent_sched_flags_enum::CU_EVENT_SCHED_BLOCKING_SYNC => { + writer.write_all(stringify!(CU_EVENT_SCHED_BLOCKING_SYNC).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUstream_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstream_flags_enum::CU_STREAM_DEFAULT => { + writer.write_all(stringify!(CU_STREAM_DEFAULT).as_bytes()) + } + &cuda_types::CUstream_flags_enum::CU_STREAM_NON_BLOCKING => { + writer.write_all(stringify!(CU_STREAM_NON_BLOCKING).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUevent_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUevent_flags_enum::CU_EVENT_DEFAULT => { + writer.write_all(stringify!(CU_EVENT_DEFAULT).as_bytes()) + } + &cuda_types::CUevent_flags_enum::CU_EVENT_BLOCKING_SYNC => { + writer.write_all(stringify!(CU_EVENT_BLOCKING_SYNC).as_bytes()) + } + &cuda_types::CUevent_flags_enum::CU_EVENT_DISABLE_TIMING => { + writer.write_all(stringify!(CU_EVENT_DISABLE_TIMING).as_bytes()) + } + &cuda_types::CUevent_flags_enum::CU_EVENT_INTERPROCESS => { + writer.write_all(stringify!(CU_EVENT_INTERPROCESS).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUevent_record_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUevent_record_flags_enum::CU_EVENT_RECORD_DEFAULT => { + writer.write_all(stringify!(CU_EVENT_RECORD_DEFAULT).as_bytes()) + } + &cuda_types::CUevent_record_flags_enum::CU_EVENT_RECORD_EXTERNAL => { + writer.write_all(stringify!(CU_EVENT_RECORD_EXTERNAL).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUevent_wait_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUevent_wait_flags_enum::CU_EVENT_WAIT_DEFAULT => { + writer.write_all(stringify!(CU_EVENT_WAIT_DEFAULT).as_bytes()) + } + &cuda_types::CUevent_wait_flags_enum::CU_EVENT_WAIT_EXTERNAL => { + writer.write_all(stringify!(CU_EVENT_WAIT_EXTERNAL).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUstreamWaitValue_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_GEQ => { + writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_GEQ).as_bytes()) + } + &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_EQ => { + writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_EQ).as_bytes()) + } + &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_AND => { + writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_AND).as_bytes()) + } + &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_NOR => { + writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_NOR).as_bytes()) + } + &cuda_types::CUstreamWaitValue_flags_enum::CU_STREAM_WAIT_VALUE_FLUSH => { + writer.write_all(stringify!(CU_STREAM_WAIT_VALUE_FLUSH).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUstreamWriteValue_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstreamWriteValue_flags_enum::CU_STREAM_WRITE_VALUE_DEFAULT => { + writer.write_all(stringify!(CU_STREAM_WRITE_VALUE_DEFAULT).as_bytes()) + } + &cuda_types::CUstreamWriteValue_flags_enum::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER => { + writer + .write_all( + stringify!(CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUstreamBatchMemOpType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WAIT_VALUE_32 => { + writer.write_all(stringify!(CU_STREAM_MEM_OP_WAIT_VALUE_32).as_bytes()) + } + &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WRITE_VALUE_32 => { + writer.write_all(stringify!(CU_STREAM_MEM_OP_WRITE_VALUE_32).as_bytes()) + } + &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WAIT_VALUE_64 => { + writer.write_all(stringify!(CU_STREAM_MEM_OP_WAIT_VALUE_64).as_bytes()) + } + &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_WRITE_VALUE_64 => { + writer.write_all(stringify!(CU_STREAM_MEM_OP_WRITE_VALUE_64).as_bytes()) + } + &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_BARRIER => { + writer.write_all(stringify!(CU_STREAM_MEM_OP_BARRIER).as_bytes()) + } + &cuda_types::CUstreamBatchMemOpType_enum::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES => { + writer + .write_all( + stringify!(CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUstreamMemoryBarrier_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstreamMemoryBarrier_flags_enum::CU_STREAM_MEMORY_BARRIER_TYPE_SYS => { + writer + .write_all(stringify!(CU_STREAM_MEMORY_BARRIER_TYPE_SYS).as_bytes()) + } + &cuda_types::CUstreamMemoryBarrier_flags_enum::CU_STREAM_MEMORY_BARRIER_TYPE_GPU => { + writer + .write_all(stringify!(CU_STREAM_MEMORY_BARRIER_TYPE_GPU).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(operation), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.operation, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUstreamBatchMemOpParams_union_CUstreamMemOpMemoryBarrierParams_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(operation), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.operation, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS_v1_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.ctx, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.count, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(paramArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.paramArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS_v2_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.ctx, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.count, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(paramArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.paramArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUoccupancy_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUoccupancy_flags_enum::CU_OCCUPANCY_DEFAULT => { + writer.write_all(stringify!(CU_OCCUPANCY_DEFAULT).as_bytes()) + } + &cuda_types::CUoccupancy_flags_enum::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE => { + writer + .write_all( + stringify!(CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUstreamUpdateCaptureDependencies_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstreamUpdateCaptureDependencies_flags_enum::CU_STREAM_ADD_CAPTURE_DEPENDENCIES => { + writer + .write_all(stringify!(CU_STREAM_ADD_CAPTURE_DEPENDENCIES).as_bytes()) + } + &cuda_types::CUstreamUpdateCaptureDependencies_flags_enum::CU_STREAM_SET_CAPTURE_DEPENDENCIES => { + writer + .write_all(stringify!(CU_STREAM_SET_CAPTURE_DEPENDENCIES).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUasyncNotificationType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUasyncNotificationType_enum::CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET => { + writer + .write_all( + stringify!(CU_ASYNC_NOTIFICATION_TYPE_OVER_BUDGET).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUasyncNotificationInfo_st__bindgen_ty_1__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(bytesOverBudget), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.bytesOverBudget, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUasyncCallback { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!( + writer, + "{:p}", + unsafe { + std::mem::transmute::< + cuda_types::CUasyncCallback, + *mut ::std::ffi::c_void, + >(*self) + }, + ) + } +} +impl crate::format::CudaDisplay for cuda_types::CUarray_format_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT8 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNSIGNED_INT8).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT16 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNSIGNED_INT16).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNSIGNED_INT32 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNSIGNED_INT32).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT8 => { + writer.write_all(stringify!(CU_AD_FORMAT_SIGNED_INT8).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT16 => { + writer.write_all(stringify!(CU_AD_FORMAT_SIGNED_INT16).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SIGNED_INT32 => { + writer.write_all(stringify!(CU_AD_FORMAT_SIGNED_INT32).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_HALF => { + writer.write_all(stringify!(CU_AD_FORMAT_HALF).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_FLOAT => { + writer.write_all(stringify!(CU_AD_FORMAT_FLOAT).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_NV12 => { + writer.write_all(stringify!(CU_AD_FORMAT_NV12).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X1 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT8X1).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X2 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT8X2).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT8X4 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT8X4).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X1 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT16X1).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X2 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT16X2).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_UNORM_INT16X4 => { + writer.write_all(stringify!(CU_AD_FORMAT_UNORM_INT16X4).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X1 => { + writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT8X1).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X2 => { + writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT8X2).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT8X4 => { + writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT8X4).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X1 => { + writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT16X1).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X2 => { + writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT16X2).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_SNORM_INT16X4 => { + writer.write_all(stringify!(CU_AD_FORMAT_SNORM_INT16X4).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC1_UNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC1_UNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC1_UNORM_SRGB => { + writer.write_all(stringify!(CU_AD_FORMAT_BC1_UNORM_SRGB).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC2_UNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC2_UNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC2_UNORM_SRGB => { + writer.write_all(stringify!(CU_AD_FORMAT_BC2_UNORM_SRGB).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC3_UNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC3_UNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC3_UNORM_SRGB => { + writer.write_all(stringify!(CU_AD_FORMAT_BC3_UNORM_SRGB).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC4_UNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC4_UNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC4_SNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC4_SNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC5_UNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC5_UNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC5_SNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC5_SNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC6H_UF16 => { + writer.write_all(stringify!(CU_AD_FORMAT_BC6H_UF16).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC6H_SF16 => { + writer.write_all(stringify!(CU_AD_FORMAT_BC6H_SF16).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC7_UNORM => { + writer.write_all(stringify!(CU_AD_FORMAT_BC7_UNORM).as_bytes()) + } + &cuda_types::CUarray_format_enum::CU_AD_FORMAT_BC7_UNORM_SRGB => { + writer.write_all(stringify!(CU_AD_FORMAT_BC7_UNORM_SRGB).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUaddress_mode_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_WRAP => { + writer.write_all(stringify!(CU_TR_ADDRESS_MODE_WRAP).as_bytes()) + } + &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_CLAMP => { + writer.write_all(stringify!(CU_TR_ADDRESS_MODE_CLAMP).as_bytes()) + } + &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_MIRROR => { + writer.write_all(stringify!(CU_TR_ADDRESS_MODE_MIRROR).as_bytes()) + } + &cuda_types::CUaddress_mode_enum::CU_TR_ADDRESS_MODE_BORDER => { + writer.write_all(stringify!(CU_TR_ADDRESS_MODE_BORDER).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUfilter_mode_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUfilter_mode_enum::CU_TR_FILTER_MODE_POINT => { + writer.write_all(stringify!(CU_TR_FILTER_MODE_POINT).as_bytes()) + } + &cuda_types::CUfilter_mode_enum::CU_TR_FILTER_MODE_LINEAR => { + writer.write_all(stringify!(CU_TR_FILTER_MODE_LINEAR).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUdevice_attribute_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_WARP_SIZE => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_WARP_SIZE).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_PITCH => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX_PITCH).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CLOCK_RATE => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_CLOCK_RATE).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_GPU_OVERLAP).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_INTEGRATED => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_INTEGRATED).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_MODE).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_ECC_ENABLED => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_ECC_ENABLED).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_PCI_BUS_ID).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TCC_DRIVER => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_TCC_DRIVER).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH => { + writer + .write_all(stringify!(CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED => { + writer + .write_all( + stringify!( + CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED + ) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS) + .as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_NUMA_CONFIG => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_NUMA_CONFIG).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_NUMA_ID => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_NUMA_ID).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED).as_bytes(), + ) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MPS_ENABLED => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MPS_ENABLED).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID).as_bytes()) + } + &cuda_types::CUdevice_attribute_enum::CU_DEVICE_ATTRIBUTE_MAX => { + writer.write_all(stringify!(CU_DEVICE_ATTRIBUTE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUdevprop_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer + .write_all(concat!("{ ", stringify!(maxThreadsPerBlock), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.maxThreadsPerBlock, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(maxThreadsDim), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.maxThreadsDim, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(maxGridSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.maxGridSize, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(sharedMemPerBlock), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.sharedMemPerBlock, "", 0, writer)?; + writer + .write_all(concat!(", ", stringify!(totalConstantMemory), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.totalConstantMemory, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(SIMDWidth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.SIMDWidth, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(memPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.memPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(regsPerBlock), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.regsPerBlock, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(clockRate), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.clockRate, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(textureAlign), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.textureAlign, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUpointer_attribute_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_CONTEXT => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_CONTEXT).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMORY_TYPE => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_MEMORY_TYPE).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_DEVICE_POINTER => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_DEVICE_POINTER).as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_HOST_POINTER => { + writer + .write_all(stringify!(CU_POINTER_ATTRIBUTE_HOST_POINTER).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_P2P_TOKENS => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_P2P_TOKENS).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_SYNC_MEMOPS).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_BUFFER_ID => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_BUFFER_ID).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_MANAGED => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_IS_MANAGED).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL).as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE) + .as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_RANGE_START_ADDR).as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_RANGE_SIZE => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_RANGE_SIZE).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPED => { + writer.write_all(stringify!(CU_POINTER_ATTRIBUTE_MAPPED).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES).as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE) + .as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAGS => { + writer + .write_all(stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAGS).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE).as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPING_SIZE => { + writer + .write_all(stringify!(CU_POINTER_ATTRIBUTE_MAPPING_SIZE).as_bytes()) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR).as_bytes(), + ) + } + &cuda_types::CUpointer_attribute_enum::CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUfunction_attribute_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK).as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES).as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES => { + writer + .write_all(stringify!(CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES).as_bytes()) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES => { + writer + .write_all(stringify!(CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES).as_bytes()) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_NUM_REGS => { + writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_NUM_REGS).as_bytes()) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_PTX_VERSION => { + writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_PTX_VERSION).as_bytes()) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_BINARY_VERSION => { + writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_BINARY_VERSION).as_bytes()) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CACHE_MODE_CA => { + writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_CACHE_MODE_CA).as_bytes()) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES) + .as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT) + .as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET).as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH).as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT).as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH).as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED => { + writer + .write_all( + stringify!(CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED) + .as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => { + writer + .write_all( + stringify!( + CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + ) + .as_bytes(), + ) + } + &cuda_types::CUfunction_attribute_enum::CU_FUNC_ATTRIBUTE_MAX => { + writer.write_all(stringify!(CU_FUNC_ATTRIBUTE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUfunc_cache_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_NONE => { + writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_NONE).as_bytes()) + } + &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_SHARED => { + writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_SHARED).as_bytes()) + } + &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_L1 => { + writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_L1).as_bytes()) + } + &cuda_types::CUfunc_cache_enum::CU_FUNC_CACHE_PREFER_EQUAL => { + writer.write_all(stringify!(CU_FUNC_CACHE_PREFER_EQUAL).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUsharedconfig_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE => { + writer + .write_all( + stringify!(CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE).as_bytes(), + ) + } + &cuda_types::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE => { + writer + .write_all( + stringify!(CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE).as_bytes(), + ) + } + &cuda_types::CUsharedconfig_enum::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE => { + writer + .write_all( + stringify!(CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUshared_carveout_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_DEFAULT => { + writer.write_all(stringify!(CU_SHAREDMEM_CARVEOUT_DEFAULT).as_bytes()) + } + &cuda_types::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_MAX_SHARED => { + writer.write_all(stringify!(CU_SHAREDMEM_CARVEOUT_MAX_SHARED).as_bytes()) + } + &cuda_types::CUshared_carveout_enum::CU_SHAREDMEM_CARVEOUT_MAX_L1 => { + writer.write_all(stringify!(CU_SHAREDMEM_CARVEOUT_MAX_L1).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemorytype_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_HOST => { + writer.write_all(stringify!(CU_MEMORYTYPE_HOST).as_bytes()) + } + &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_DEVICE => { + writer.write_all(stringify!(CU_MEMORYTYPE_DEVICE).as_bytes()) + } + &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_ARRAY => { + writer.write_all(stringify!(CU_MEMORYTYPE_ARRAY).as_bytes()) + } + &cuda_types::CUmemorytype_enum::CU_MEMORYTYPE_UNIFIED => { + writer.write_all(stringify!(CU_MEMORYTYPE_UNIFIED).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUcomputemode_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUcomputemode_enum::CU_COMPUTEMODE_DEFAULT => { + writer.write_all(stringify!(CU_COMPUTEMODE_DEFAULT).as_bytes()) + } + &cuda_types::CUcomputemode_enum::CU_COMPUTEMODE_PROHIBITED => { + writer.write_all(stringify!(CU_COMPUTEMODE_PROHIBITED).as_bytes()) + } + &cuda_types::CUcomputemode_enum::CU_COMPUTEMODE_EXCLUSIVE_PROCESS => { + writer.write_all(stringify!(CU_COMPUTEMODE_EXCLUSIVE_PROCESS).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmem_advise_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_SET_READ_MOSTLY => { + writer.write_all(stringify!(CU_MEM_ADVISE_SET_READ_MOSTLY).as_bytes()) + } + &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_READ_MOSTLY => { + writer.write_all(stringify!(CU_MEM_ADVISE_UNSET_READ_MOSTLY).as_bytes()) + } + &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_SET_PREFERRED_LOCATION => { + writer + .write_all( + stringify!(CU_MEM_ADVISE_SET_PREFERRED_LOCATION).as_bytes(), + ) + } + &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION => { + writer + .write_all( + stringify!(CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION).as_bytes(), + ) + } + &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_SET_ACCESSED_BY => { + writer.write_all(stringify!(CU_MEM_ADVISE_SET_ACCESSED_BY).as_bytes()) + } + &cuda_types::CUmem_advise_enum::CU_MEM_ADVISE_UNSET_ACCESSED_BY => { + writer.write_all(stringify!(CU_MEM_ADVISE_UNSET_ACCESSED_BY).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmem_range_attribute_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY => { + writer + .write_all(stringify!(CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY).as_bytes()) + } + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION => { + writer + .write_all( + stringify!(CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION).as_bytes(), + ) + } + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY => { + writer + .write_all(stringify!(CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY).as_bytes()) + } + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION => { + writer + .write_all( + stringify!(CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION) + .as_bytes(), + ) + } + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE => { + writer + .write_all( + stringify!(CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_TYPE) + .as_bytes(), + ) + } + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID => { + writer + .write_all( + stringify!(CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION_ID) + .as_bytes(), + ) + } + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE => { + writer + .write_all( + stringify!(CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_TYPE) + .as_bytes(), + ) + } + &cuda_types::CUmem_range_attribute_enum::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID => { + writer + .write_all( + stringify!(CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION_ID) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUjit_option_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUjit_option_enum::CU_JIT_MAX_REGISTERS => { + writer.write_all(stringify!(CU_JIT_MAX_REGISTERS).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_THREADS_PER_BLOCK => { + writer.write_all(stringify!(CU_JIT_THREADS_PER_BLOCK).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_WALL_TIME => { + writer.write_all(stringify!(CU_JIT_WALL_TIME).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_INFO_LOG_BUFFER => { + writer.write_all(stringify!(CU_JIT_INFO_LOG_BUFFER).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES => { + writer + .write_all(stringify!(CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_ERROR_LOG_BUFFER => { + writer.write_all(stringify!(CU_JIT_ERROR_LOG_BUFFER).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES => { + writer + .write_all(stringify!(CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_OPTIMIZATION_LEVEL => { + writer.write_all(stringify!(CU_JIT_OPTIMIZATION_LEVEL).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_TARGET_FROM_CUCONTEXT => { + writer.write_all(stringify!(CU_JIT_TARGET_FROM_CUCONTEXT).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_TARGET => { + writer.write_all(stringify!(CU_JIT_TARGET).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_FALLBACK_STRATEGY => { + writer.write_all(stringify!(CU_JIT_FALLBACK_STRATEGY).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_GENERATE_DEBUG_INFO => { + writer.write_all(stringify!(CU_JIT_GENERATE_DEBUG_INFO).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_LOG_VERBOSE => { + writer.write_all(stringify!(CU_JIT_LOG_VERBOSE).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_GENERATE_LINE_INFO => { + writer.write_all(stringify!(CU_JIT_GENERATE_LINE_INFO).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_CACHE_MODE => { + writer.write_all(stringify!(CU_JIT_CACHE_MODE).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_NEW_SM3X_OPT => { + writer.write_all(stringify!(CU_JIT_NEW_SM3X_OPT).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_FAST_COMPILE => { + writer.write_all(stringify!(CU_JIT_FAST_COMPILE).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_NAMES => { + writer.write_all(stringify!(CU_JIT_GLOBAL_SYMBOL_NAMES).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_ADDRESSES => { + writer.write_all(stringify!(CU_JIT_GLOBAL_SYMBOL_ADDRESSES).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_GLOBAL_SYMBOL_COUNT => { + writer.write_all(stringify!(CU_JIT_GLOBAL_SYMBOL_COUNT).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_LTO => { + writer.write_all(stringify!(CU_JIT_LTO).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_FTZ => { + writer.write_all(stringify!(CU_JIT_FTZ).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_PREC_DIV => { + writer.write_all(stringify!(CU_JIT_PREC_DIV).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_PREC_SQRT => { + writer.write_all(stringify!(CU_JIT_PREC_SQRT).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_FMA => { + writer.write_all(stringify!(CU_JIT_FMA).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_KERNEL_NAMES => { + writer.write_all(stringify!(CU_JIT_REFERENCED_KERNEL_NAMES).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_KERNEL_COUNT => { + writer.write_all(stringify!(CU_JIT_REFERENCED_KERNEL_COUNT).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_VARIABLE_NAMES => { + writer.write_all(stringify!(CU_JIT_REFERENCED_VARIABLE_NAMES).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_REFERENCED_VARIABLE_COUNT => { + writer.write_all(stringify!(CU_JIT_REFERENCED_VARIABLE_COUNT).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES => { + writer + .write_all( + stringify!(CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES).as_bytes(), + ) + } + &cuda_types::CUjit_option_enum::CU_JIT_POSITION_INDEPENDENT_CODE => { + writer.write_all(stringify!(CU_JIT_POSITION_INDEPENDENT_CODE).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_MIN_CTA_PER_SM => { + writer.write_all(stringify!(CU_JIT_MIN_CTA_PER_SM).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_MAX_THREADS_PER_BLOCK => { + writer.write_all(stringify!(CU_JIT_MAX_THREADS_PER_BLOCK).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_OVERRIDE_DIRECTIVE_VALUES => { + writer.write_all(stringify!(CU_JIT_OVERRIDE_DIRECTIVE_VALUES).as_bytes()) + } + &cuda_types::CUjit_option_enum::CU_JIT_NUM_OPTIONS => { + writer.write_all(stringify!(CU_JIT_NUM_OPTIONS).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUjit_target_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_30 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_30).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_32 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_32).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_35 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_35).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_37 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_37).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_50 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_50).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_52 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_52).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_53 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_53).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_60 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_60).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_61 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_61).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_62 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_62).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_70 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_70).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_72 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_72).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_75 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_75).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_80 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_80).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_86 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_86).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_87 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_87).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_89 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_89).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_90 => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_90).as_bytes()) + } + &cuda_types::CUjit_target_enum::CU_TARGET_COMPUTE_90A => { + writer.write_all(stringify!(CU_TARGET_COMPUTE_90A).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUjit_fallback_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUjit_fallback_enum::CU_PREFER_PTX => { + writer.write_all(stringify!(CU_PREFER_PTX).as_bytes()) + } + &cuda_types::CUjit_fallback_enum::CU_PREFER_BINARY => { + writer.write_all(stringify!(CU_PREFER_BINARY).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUjit_cacheMode_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_NONE => { + writer.write_all(stringify!(CU_JIT_CACHE_OPTION_NONE).as_bytes()) + } + &cuda_types::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_CG => { + writer.write_all(stringify!(CU_JIT_CACHE_OPTION_CG).as_bytes()) + } + &cuda_types::CUjit_cacheMode_enum::CU_JIT_CACHE_OPTION_CA => { + writer.write_all(stringify!(CU_JIT_CACHE_OPTION_CA).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUjitInputType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_CUBIN => { + writer.write_all(stringify!(CU_JIT_INPUT_CUBIN).as_bytes()) + } + &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_PTX => { + writer.write_all(stringify!(CU_JIT_INPUT_PTX).as_bytes()) + } + &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_FATBINARY => { + writer.write_all(stringify!(CU_JIT_INPUT_FATBINARY).as_bytes()) + } + &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_OBJECT => { + writer.write_all(stringify!(CU_JIT_INPUT_OBJECT).as_bytes()) + } + &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_LIBRARY => { + writer.write_all(stringify!(CU_JIT_INPUT_LIBRARY).as_bytes()) + } + &cuda_types::CUjitInputType_enum::CU_JIT_INPUT_NVVM => { + writer.write_all(stringify!(CU_JIT_INPUT_NVVM).as_bytes()) + } + &cuda_types::CUjitInputType_enum::CU_JIT_NUM_INPUT_TYPES => { + writer.write_all(stringify!(CU_JIT_NUM_INPUT_TYPES).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUlinkState { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphicsRegisterFlags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_NONE => { + writer.write_all(stringify!(CU_GRAPHICS_REGISTER_FLAGS_NONE).as_bytes()) + } + &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY => { + writer + .write_all( + stringify!(CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY).as_bytes(), + ) + } + &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD => { + writer + .write_all( + stringify!(CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD).as_bytes(), + ) + } + &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST => { + writer + .write_all( + stringify!(CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST).as_bytes(), + ) + } + &cuda_types::CUgraphicsRegisterFlags_enum::CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER => { + writer + .write_all( + stringify!(CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphicsMapResourceFlags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE => { + writer + .write_all( + stringify!(CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE).as_bytes(), + ) + } + &cuda_types::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY => { + writer + .write_all( + stringify!(CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY).as_bytes(), + ) + } + &cuda_types::CUgraphicsMapResourceFlags_enum::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD => { + writer + .write_all( + stringify!(CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUarray_cubemap_face_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_X => { + writer.write_all(stringify!(CU_CUBEMAP_FACE_POSITIVE_X).as_bytes()) + } + &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_X => { + writer.write_all(stringify!(CU_CUBEMAP_FACE_NEGATIVE_X).as_bytes()) + } + &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_Y => { + writer.write_all(stringify!(CU_CUBEMAP_FACE_POSITIVE_Y).as_bytes()) + } + &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_Y => { + writer.write_all(stringify!(CU_CUBEMAP_FACE_NEGATIVE_Y).as_bytes()) + } + &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_POSITIVE_Z => { + writer.write_all(stringify!(CU_CUBEMAP_FACE_POSITIVE_Z).as_bytes()) + } + &cuda_types::CUarray_cubemap_face_enum::CU_CUBEMAP_FACE_NEGATIVE_Z => { + writer.write_all(stringify!(CU_CUBEMAP_FACE_NEGATIVE_Z).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUlimit_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUlimit_enum::CU_LIMIT_STACK_SIZE => { + writer.write_all(stringify!(CU_LIMIT_STACK_SIZE).as_bytes()) + } + &cuda_types::CUlimit_enum::CU_LIMIT_PRINTF_FIFO_SIZE => { + writer.write_all(stringify!(CU_LIMIT_PRINTF_FIFO_SIZE).as_bytes()) + } + &cuda_types::CUlimit_enum::CU_LIMIT_MALLOC_HEAP_SIZE => { + writer.write_all(stringify!(CU_LIMIT_MALLOC_HEAP_SIZE).as_bytes()) + } + &cuda_types::CUlimit_enum::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH => { + writer.write_all(stringify!(CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH).as_bytes()) + } + &cuda_types::CUlimit_enum::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT => { + writer + .write_all( + stringify!(CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT).as_bytes(), + ) + } + &cuda_types::CUlimit_enum::CU_LIMIT_MAX_L2_FETCH_GRANULARITY => { + writer + .write_all(stringify!(CU_LIMIT_MAX_L2_FETCH_GRANULARITY).as_bytes()) + } + &cuda_types::CUlimit_enum::CU_LIMIT_PERSISTING_L2_CACHE_SIZE => { + writer + .write_all(stringify!(CU_LIMIT_PERSISTING_L2_CACHE_SIZE).as_bytes()) + } + &cuda_types::CUlimit_enum::CU_LIMIT_MAX => { + writer.write_all(stringify!(CU_LIMIT_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUresourcetype_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_ARRAY => { + writer.write_all(stringify!(CU_RESOURCE_TYPE_ARRAY).as_bytes()) + } + &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY => { + writer.write_all(stringify!(CU_RESOURCE_TYPE_MIPMAPPED_ARRAY).as_bytes()) + } + &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_LINEAR => { + writer.write_all(stringify!(CU_RESOURCE_TYPE_LINEAR).as_bytes()) + } + &cuda_types::CUresourcetype_enum::CU_RESOURCE_TYPE_PITCH2D => { + writer.write_all(stringify!(CU_RESOURCE_TYPE_PITCH2D).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUhostFn { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!( + writer, + "{:p}", + unsafe { + std::mem::transmute::< + cuda_types::CUhostFn, + *mut ::std::ffi::c_void, + >(*self) + }, + ) + } +} +impl crate::format::CudaDisplay for cuda_types::CUaccessProperty_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUaccessProperty_enum::CU_ACCESS_PROPERTY_NORMAL => { + writer.write_all(stringify!(CU_ACCESS_PROPERTY_NORMAL).as_bytes()) + } + &cuda_types::CUaccessProperty_enum::CU_ACCESS_PROPERTY_STREAMING => { + writer.write_all(stringify!(CU_ACCESS_PROPERTY_STREAMING).as_bytes()) + } + &cuda_types::CUaccessProperty_enum::CU_ACCESS_PROPERTY_PERSISTING => { + writer.write_all(stringify!(CU_ACCESS_PROPERTY_PERSISTING).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUaccessPolicyWindow_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(base_ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.base_ptr, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(num_bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.num_bytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(hitRatio), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.hitRatio, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(hitProp), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.hitProp, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(missProp), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.missProp, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.func, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.sharedMemBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.kernelParams, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(extra), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extra, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_v2_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.func, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.sharedMemBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.kernelParams, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(extra), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extra, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(kern), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.kern, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.ctx, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_KERNEL_NODE_PARAMS_v3_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.func, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.sharedMemBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.kernelParams, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(extra), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extra, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(kern), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.kern, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.ctx, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMSET_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dst, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(pitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.pitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.value, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(elementSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.elementSize, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.height, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMSET_NODE_PARAMS_v2_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dst, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(pitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.pitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.value, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(elementSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.elementSize, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.ctx, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_HOST_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(fn_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.fn_, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(userData), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.userData, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_HOST_NODE_PARAMS_v2_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(fn_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.fn_, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(userData), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.userData, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphConditionalNodeType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphConditionalNodeType_enum::CU_GRAPH_COND_TYPE_IF => { + writer.write_all(stringify!(CU_GRAPH_COND_TYPE_IF).as_bytes()) + } + &cuda_types::CUgraphConditionalNodeType_enum::CU_GRAPH_COND_TYPE_WHILE => { + writer.write_all(stringify!(CU_GRAPH_COND_TYPE_WHILE).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_CONDITIONAL_NODE_PARAMS { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.handle, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.type_, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.size, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(phGraph_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.phGraph_out, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.ctx, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphNodeType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_KERNEL => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_KERNEL).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEMCPY => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEMCPY).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEMSET => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEMSET).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_HOST => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_HOST).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_GRAPH => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_GRAPH).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EMPTY => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_EMPTY).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_WAIT_EVENT => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_WAIT_EVENT).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EVENT_RECORD => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_EVENT_RECORD).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL => { + writer + .write_all( + stringify!(CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL).as_bytes(), + ) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT => { + writer + .write_all(stringify!(CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEM_ALLOC => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEM_ALLOC).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_MEM_FREE => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_MEM_FREE).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_BATCH_MEM_OP => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_BATCH_MEM_OP).as_bytes()) + } + &cuda_types::CUgraphNodeType_enum::CU_GRAPH_NODE_TYPE_CONDITIONAL => { + writer.write_all(stringify!(CU_GRAPH_NODE_TYPE_CONDITIONAL).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphDependencyType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphDependencyType_enum::CU_GRAPH_DEPENDENCY_TYPE_DEFAULT => { + writer.write_all(stringify!(CU_GRAPH_DEPENDENCY_TYPE_DEFAULT).as_bytes()) + } + &cuda_types::CUgraphDependencyType_enum::CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC => { + writer + .write_all( + stringify!(CU_GRAPH_DEPENDENCY_TYPE_PROGRAMMATIC).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphEdgeData_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(from_port), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.from_port, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(to_port), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.to_port, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.type_, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiateResult_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_SUCCESS => { + writer.write_all(stringify!(CUDA_GRAPH_INSTANTIATE_SUCCESS).as_bytes()) + } + &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_ERROR => { + writer.write_all(stringify!(CUDA_GRAPH_INSTANTIATE_ERROR).as_bytes()) + } + &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE => { + writer + .write_all( + stringify!(CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE).as_bytes(), + ) + } + &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED => { + writer + .write_all( + stringify!(CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUgraphInstantiateResult_enum::CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED => { + writer + .write_all( + stringify!(CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(hUploadStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.hUploadStream, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(hErrNode_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.hErrNode_out, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(result_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.result_out, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUsynchronizationPolicy_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_AUTO => { + writer.write_all(stringify!(CU_SYNC_POLICY_AUTO).as_bytes()) + } + &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_SPIN => { + writer.write_all(stringify!(CU_SYNC_POLICY_SPIN).as_bytes()) + } + &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_YIELD => { + writer.write_all(stringify!(CU_SYNC_POLICY_YIELD).as_bytes()) + } + &cuda_types::CUsynchronizationPolicy_enum::CU_SYNC_POLICY_BLOCKING_SYNC => { + writer.write_all(stringify!(CU_SYNC_POLICY_BLOCKING_SYNC).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUclusterSchedulingPolicy_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_DEFAULT => { + writer + .write_all( + stringify!(CU_CLUSTER_SCHEDULING_POLICY_DEFAULT).as_bytes(), + ) + } + &cuda_types::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_SPREAD => { + writer + .write_all( + stringify!(CU_CLUSTER_SCHEDULING_POLICY_SPREAD).as_bytes(), + ) + } + &cuda_types::CUclusterSchedulingPolicy_enum::CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING => { + writer + .write_all( + stringify!(CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUlaunchMemSyncDomain_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUlaunchMemSyncDomain_enum::CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT => { + writer + .write_all(stringify!(CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT).as_bytes()) + } + &cuda_types::CUlaunchMemSyncDomain_enum::CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE => { + writer.write_all(stringify!(CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUlaunchMemSyncDomainMap_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(default_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.default_, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(remote), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.remote, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUlaunchAttributeID_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_IGNORE => { + writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_IGNORE).as_bytes()) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW).as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_COOPERATIVE => { + writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_COOPERATIVE).as_bytes()) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY).as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION).as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE => { + writer + .write_all( + stringify!( + CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + ) + .as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION) + .as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT).as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_PRIORITY => { + writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_PRIORITY).as_bytes()) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP).as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN).as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_LAUNCH_COMPLETION_EVENT) + .as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE => { + writer + .write_all( + stringify!(CU_LAUNCH_ATTRIBUTE_DEVICE_UPDATABLE_KERNEL_NODE) + .as_bytes(), + ) + } + &cuda_types::CUlaunchAttributeID_enum::CU_LAUNCH_ATTRIBUTE_MAX => { + writer.write_all(stringify!(CU_LAUNCH_ATTRIBUTE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(x), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.x, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(y), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.y, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(z), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.z, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_2 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.event, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer + .write_all(concat!(", ", stringify!(triggerAtBlockStart), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.triggerAtBlockStart, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_3 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.event, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUlaunchAttributeValue_union__bindgen_ty_4 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(deviceUpdatable), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.deviceUpdatable, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(devNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.devNode, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUstreamCaptureStatus_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_NONE => { + writer.write_all(stringify!(CU_STREAM_CAPTURE_STATUS_NONE).as_bytes()) + } + &cuda_types::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_ACTIVE => { + writer.write_all(stringify!(CU_STREAM_CAPTURE_STATUS_ACTIVE).as_bytes()) + } + &cuda_types::CUstreamCaptureStatus_enum::CU_STREAM_CAPTURE_STATUS_INVALIDATED => { + writer + .write_all( + stringify!(CU_STREAM_CAPTURE_STATUS_INVALIDATED).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUstreamCaptureMode_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_GLOBAL => { + writer.write_all(stringify!(CU_STREAM_CAPTURE_MODE_GLOBAL).as_bytes()) + } + &cuda_types::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_THREAD_LOCAL => { + writer + .write_all( + stringify!(CU_STREAM_CAPTURE_MODE_THREAD_LOCAL).as_bytes(), + ) + } + &cuda_types::CUstreamCaptureMode_enum::CU_STREAM_CAPTURE_MODE_RELAXED => { + writer.write_all(stringify!(CU_STREAM_CAPTURE_MODE_RELAXED).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddress_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_DEFAULT => { + writer.write_all(stringify!(CU_GET_PROC_ADDRESS_DEFAULT).as_bytes()) + } + &cuda_types::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_LEGACY_STREAM => { + writer + .write_all(stringify!(CU_GET_PROC_ADDRESS_LEGACY_STREAM).as_bytes()) + } + &cuda_types::CUdriverProcAddress_flags_enum::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM => { + writer + .write_all( + stringify!(CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUdriverProcAddressQueryResult_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_SUCCESS => { + writer.write_all(stringify!(CU_GET_PROC_ADDRESS_SUCCESS).as_bytes()) + } + &cuda_types::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND => { + writer + .write_all( + stringify!(CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND).as_bytes(), + ) + } + &cuda_types::CUdriverProcAddressQueryResult_enum::CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT => { + writer + .write_all( + stringify!(CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUexecAffinityType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUexecAffinityType_enum::CU_EXEC_AFFINITY_TYPE_SM_COUNT => { + writer.write_all(stringify!(CU_EXEC_AFFINITY_TYPE_SM_COUNT).as_bytes()) + } + &cuda_types::CUexecAffinityType_enum::CU_EXEC_AFFINITY_TYPE_MAX => { + writer.write_all(stringify!(CU_EXEC_AFFINITY_TYPE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUexecAffinitySmCount_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(val), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.val, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUlibraryOption_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUlibraryOption_enum::CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE => { + writer + .write_all( + stringify!(CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE) + .as_bytes(), + ) + } + &cuda_types::CUlibraryOption_enum::CU_LIBRARY_BINARY_IS_PRESERVED => { + writer.write_all(stringify!(CU_LIBRARY_BINARY_IS_PRESERVED).as_bytes()) + } + &cuda_types::CUlibraryOption_enum::CU_LIBRARY_NUM_OPTIONS => { + writer.write_all(stringify!(CU_LIBRARY_NUM_OPTIONS).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUlibraryHostUniversalFunctionAndDataTable_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(functionTable), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.functionTable, "", 0, writer)?; + writer + .write_all(concat!(", ", stringify!(functionWindowSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.functionWindowSize, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dataTable), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dataTable, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dataWindowSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dataWindowSize, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUdevice_P2PAttribute_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK => { + writer + .write_all( + stringify!(CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK).as_bytes(), + ) + } + &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED).as_bytes(), + ) + } + &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED) + .as_bytes(), + ) + } + &cuda_types::CUdevice_P2PAttribute_enum::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED => { + writer + .write_all( + stringify!(CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUstreamCallback { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!( + writer, + "{:p}", + unsafe { + std::mem::transmute::< + cuda_types::CUstreamCallback, + *mut ::std::ffi::c_void, + >(*self) + }, + ) + } +} +impl crate::format::CudaDisplay for cuda_types::CUoccupancyB2DSize { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!( + writer, + "{:p}", + unsafe { + std::mem::transmute::< + cuda_types::CUoccupancyB2DSize, + *mut ::std::ffi::c_void, + >(*self) + }, + ) + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY2D_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(srcXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(WidthInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.WidthInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(srcXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcLOD), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcLOD, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHeight), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHeight, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstLOD), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstLOD, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHeight), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHeight, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(WidthInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.WidthInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Depth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Depth, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_PEER_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(srcXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcLOD), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcLOD, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcContext, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHeight), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHeight, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstLOD), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstLOD, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstContext, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHeight), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHeight, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(WidthInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.WidthInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Depth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Depth, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(copyCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.copyCtx, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(copyParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.copyParams, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_DESCRIPTOR_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Format), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Format, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(NumChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.NumChannels, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY3D_DESCRIPTOR_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Depth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Depth, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Format), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Format, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(NumChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.NumChannels, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(tileExtent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.tileExtent, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(miptailFirstLevel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.miptailFirstLevel, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(miptailSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.miptailSize, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES_st__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(depth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.depth, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.size, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(alignment), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.alignment, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.hArray, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(hMipmappedArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.hMipmappedArray, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.devPtr, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(format), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.format, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(numChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numChannels, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(sizeInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.sizeInBytes, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.devPtr, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(format), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.format, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(numChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numChannels, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(pitchInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.pitchInBytes, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_TEXTURE_DESC_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(addressMode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.addressMode, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(filterMode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.filterMode, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(maxAnisotropy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.maxAnisotropy, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(mipmapFilterMode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.mipmapFilterMode, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(mipmapLevelBias), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.mipmapLevelBias, "", 0, writer)?; + writer + .write_all(concat!(", ", stringify!(minMipmapLevelClamp), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.minMipmapLevelClamp, "", 0, writer)?; + writer + .write_all(concat!(", ", stringify!(maxMipmapLevelClamp), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.maxMipmapLevelClamp, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(borderColor), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.borderColor, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUresourceViewFormat_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_NONE => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_NONE).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X8 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_1X8).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X8 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_2X8).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X8 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_4X8).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X8 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_1X8).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X8 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_2X8).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X8 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_4X8).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_1X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_2X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_4X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_1X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_2X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_4X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_1X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_1X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_2X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_2X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UINT_4X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UINT_4X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_1X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_1X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_2X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_2X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SINT_4X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SINT_4X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_1X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_1X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_2X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_2X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_4X16 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_4X16).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_1X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_1X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_2X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_2X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_FLOAT_4X32 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_FLOAT_4X32).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC1 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC1).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC2 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC2).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC3 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC3).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC4 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC4).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC4 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SIGNED_BC4).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC5 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC5).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC5 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SIGNED_BC5).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC6H => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC6H).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_SIGNED_BC6H => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_SIGNED_BC6H).as_bytes()) + } + &cuda_types::CUresourceViewFormat_enum::CU_RES_VIEW_FORMAT_UNSIGNED_BC7 => { + writer.write_all(stringify!(CU_RES_VIEW_FORMAT_UNSIGNED_BC7).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_RESOURCE_VIEW_DESC_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(format), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.format, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(depth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.depth, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(firstMipmapLevel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.firstMipmapLevel, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(lastMipmapLevel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.lastMipmapLevel, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(firstLayer), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.firstLayer, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(lastLayer), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.lastLayer, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUtensorMap_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(opaque), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.opaque, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUtensorMapDataType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT8 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT8).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT16 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT16).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT32 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT32).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_INT32 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_INT32).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_UINT64 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_UINT64).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_INT64 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_INT64).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT16 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT16).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT32 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT32).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT64 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT64).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_BFLOAT16 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_BFLOAT16).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ => { + writer + .write_all( + stringify!(CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ).as_bytes(), + ) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_TFLOAT32 => { + writer.write_all(stringify!(CU_TENSOR_MAP_DATA_TYPE_TFLOAT32).as_bytes()) + } + &cuda_types::CUtensorMapDataType_enum::CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ => { + writer + .write_all( + stringify!(CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUtensorMapInterleave_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_NONE => { + writer.write_all(stringify!(CU_TENSOR_MAP_INTERLEAVE_NONE).as_bytes()) + } + &cuda_types::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_16B => { + writer.write_all(stringify!(CU_TENSOR_MAP_INTERLEAVE_16B).as_bytes()) + } + &cuda_types::CUtensorMapInterleave_enum::CU_TENSOR_MAP_INTERLEAVE_32B => { + writer.write_all(stringify!(CU_TENSOR_MAP_INTERLEAVE_32B).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUtensorMapSwizzle_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_NONE => { + writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_NONE).as_bytes()) + } + &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_32B => { + writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_32B).as_bytes()) + } + &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_64B => { + writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_64B).as_bytes()) + } + &cuda_types::CUtensorMapSwizzle_enum::CU_TENSOR_MAP_SWIZZLE_128B => { + writer.write_all(stringify!(CU_TENSOR_MAP_SWIZZLE_128B).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUtensorMapL2promotion_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_NONE => { + writer.write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_NONE).as_bytes()) + } + &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_64B => { + writer + .write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_L2_64B).as_bytes()) + } + &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_128B => { + writer + .write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_L2_128B).as_bytes()) + } + &cuda_types::CUtensorMapL2promotion_enum::CU_TENSOR_MAP_L2_PROMOTION_L2_256B => { + writer + .write_all(stringify!(CU_TENSOR_MAP_L2_PROMOTION_L2_256B).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUtensorMapFloatOOBfill_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUtensorMapFloatOOBfill_enum::CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE => { + writer + .write_all(stringify!(CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE).as_bytes()) + } + &cuda_types::CUtensorMapFloatOOBfill_enum::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA => { + writer + .write_all( + stringify!(CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(p2pToken), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.p2pToken, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(vaSpaceToken), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.vaSpaceToken, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE).as_bytes(), + ) + } + &cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ).as_bytes(), + ) + } + &cuda_types::CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum::CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE => { + writer + .write_all( + stringify!(CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_LAUNCH_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(function), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.function, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.gridDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.blockDimZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.sharedMemBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.hStream, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.kernelParams, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUexternalMemoryHandleType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD).as_bytes(), + ) + } + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32) + .as_bytes(), + ) + } + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT) + .as_bytes(), + ) + } + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP).as_bytes(), + ) + } + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE) + .as_bytes(), + ) + } + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE) + .as_bytes(), + ) + } + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT) + .as_bytes(), + ) + } + &cuda_types::CUexternalMemoryHandleType_enum::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF => { + writer + .write_all( + stringify!(CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.handle, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.name, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(offset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.offset, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.size, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(offset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.offset, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(arrayDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.arrayDesc, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(numLevels), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numLevels, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUexternalSemaphoreHandleType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD => { + writer + .write_all( + stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 => { + writer + .write_all( + stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT => { + writer + .write_all( + stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE => { + writer + .write_all( + stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE => { + writer + .write_all( + stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC => { + writer + .write_all( + stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX => { + writer + .write_all( + stringify!(CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT => { + writer + .write_all( + stringify!( + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + ) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD => { + writer + .write_all( + stringify!( + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD + ) + .as_bytes(), + ) + } + &cuda_types::CUexternalSemaphoreHandleType_enum::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 => { + writer + .write_all( + stringify!( + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + ) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.handle, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.name, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(params), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.params, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(fence), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.fence, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(nvSciSync), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.nvSciSync, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(keyedMutex), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.keyedMutex, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.value, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(key), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.key, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(params), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.params, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(fence), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.fence, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(nvSciSync), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.nvSciSync, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(keyedMutex), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.keyedMutex, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.value, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_3 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(key), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.key, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(timeoutMs), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.timeoutMs, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extSemArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.paramsArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numExtSems, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v2_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extSemArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.paramsArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numExtSems, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extSemArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.paramsArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numExtSems, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS_v2_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extSemArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.paramsArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numExtSems, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAllocationHandleType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_NONE => { + writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_NONE).as_bytes()) + } + &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR => { + writer + .write_all( + stringify!(CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR).as_bytes(), + ) + } + &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_WIN32 => { + writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_WIN32).as_bytes()) + } + &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_WIN32_KMT => { + writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_WIN32_KMT).as_bytes()) + } + &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_FABRIC => { + writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_FABRIC).as_bytes()) + } + &cuda_types::CUmemAllocationHandleType_enum::CU_MEM_HANDLE_TYPE_MAX => { + writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAccess_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_NONE => { + writer.write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_NONE).as_bytes()) + } + &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_READ => { + writer.write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_READ).as_bytes()) + } + &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_READWRITE => { + writer + .write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_READWRITE).as_bytes()) + } + &cuda_types::CUmemAccess_flags_enum::CU_MEM_ACCESS_FLAGS_PROT_MAX => { + writer.write_all(stringify!(CU_MEM_ACCESS_FLAGS_PROT_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemLocationType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_INVALID => { + writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_INVALID).as_bytes()) + } + &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_DEVICE => { + writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_DEVICE).as_bytes()) + } + &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST => { + writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_HOST).as_bytes()) + } + &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST_NUMA => { + writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_HOST_NUMA).as_bytes()) + } + &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT => { + writer + .write_all( + stringify!(CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT).as_bytes(), + ) + } + &cuda_types::CUmemLocationType_enum::CU_MEM_LOCATION_TYPE_MAX => { + writer.write_all(stringify!(CU_MEM_LOCATION_TYPE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAllocationType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_INVALID => { + writer.write_all(stringify!(CU_MEM_ALLOCATION_TYPE_INVALID).as_bytes()) + } + &cuda_types::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_PINNED => { + writer.write_all(stringify!(CU_MEM_ALLOCATION_TYPE_PINNED).as_bytes()) + } + &cuda_types::CUmemAllocationType_enum::CU_MEM_ALLOCATION_TYPE_MAX => { + writer.write_all(stringify!(CU_MEM_ALLOCATION_TYPE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAllocationGranularity_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemAllocationGranularity_flags_enum::CU_MEM_ALLOC_GRANULARITY_MINIMUM => { + writer.write_all(stringify!(CU_MEM_ALLOC_GRANULARITY_MINIMUM).as_bytes()) + } + &cuda_types::CUmemAllocationGranularity_flags_enum::CU_MEM_ALLOC_GRANULARITY_RECOMMENDED => { + writer + .write_all( + stringify!(CU_MEM_ALLOC_GRANULARITY_RECOMMENDED).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemRangeHandleType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemRangeHandleType_enum::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD => { + writer + .write_all( + stringify!(CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD).as_bytes(), + ) + } + &cuda_types::CUmemRangeHandleType_enum::CU_MEM_RANGE_HANDLE_TYPE_MAX => { + writer.write_all(stringify!(CU_MEM_RANGE_HANDLE_TYPE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUarraySparseSubresourceType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUarraySparseSubresourceType_enum::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL => { + writer + .write_all( + stringify!(CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL) + .as_bytes(), + ) + } + &cuda_types::CUarraySparseSubresourceType_enum::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL => { + writer + .write_all( + stringify!(CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemOperationType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemOperationType_enum::CU_MEM_OPERATION_TYPE_MAP => { + writer.write_all(stringify!(CU_MEM_OPERATION_TYPE_MAP).as_bytes()) + } + &cuda_types::CUmemOperationType_enum::CU_MEM_OPERATION_TYPE_UNMAP => { + writer.write_all(stringify!(CU_MEM_OPERATION_TYPE_UNMAP).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemHandleType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemHandleType_enum::CU_MEM_HANDLE_TYPE_GENERIC => { + writer.write_all(stringify!(CU_MEM_HANDLE_TYPE_GENERIC).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay +for cuda_types::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(level), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.level, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(layer), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.layer, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(offsetX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.offsetX, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(offsetY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.offsetY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(offsetZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.offsetZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(extentWidth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extentWidth, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(extentHeight), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extentHeight, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(extentDepth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.extentDepth, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay +for cuda_types::CUarrayMapInfo_st__bindgen_ty_2__bindgen_ty_2 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(layer), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.layer, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(offset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.offset, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.size, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemLocation_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.type_, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(id), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.id, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAllocationCompType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemAllocationCompType_enum::CU_MEM_ALLOCATION_COMP_NONE => { + writer.write_all(stringify!(CU_MEM_ALLOCATION_COMP_NONE).as_bytes()) + } + &cuda_types::CUmemAllocationCompType_enum::CU_MEM_ALLOCATION_COMP_GENERIC => { + writer.write_all(stringify!(CU_MEM_ALLOCATION_COMP_GENERIC).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAllocationProp_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.type_, "", 0, writer)?; + writer + .write_all( + concat!(", ", stringify!(requestedHandleTypes), ": ").as_bytes(), + )?; + crate::format::CudaDisplay::write(&self.requestedHandleTypes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.location, "", 0, writer)?; + writer + .write_all(concat!(", ", stringify!(win32HandleMetaData), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.win32HandleMetaData, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(allocFlags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.allocFlags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAllocationProp_st__bindgen_ty_1 { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(compressionType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.compressionType, "", 0, writer)?; + writer + .write_all( + concat!(", ", stringify!(gpuDirectRDMACapable), ": ").as_bytes(), + )?; + crate::format::CudaDisplay::write(&self.gpuDirectRDMACapable, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(usage), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.usage, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUmulticastGranularity_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmulticastGranularity_flags_enum::CU_MULTICAST_GRANULARITY_MINIMUM => { + writer.write_all(stringify!(CU_MULTICAST_GRANULARITY_MINIMUM).as_bytes()) + } + &cuda_types::CUmulticastGranularity_flags_enum::CU_MULTICAST_GRANULARITY_RECOMMENDED => { + writer + .write_all( + stringify!(CU_MULTICAST_GRANULARITY_RECOMMENDED).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmulticastObjectProp_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(numDevices), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.numDevices, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.size, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(handleTypes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.handleTypes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemAccessDesc_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.location, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResult_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_SUCCESS => { + writer.write_all(stringify!(CU_GRAPH_EXEC_UPDATE_SUCCESS).as_bytes()) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR => { + writer.write_all(stringify!(CU_GRAPH_EXEC_UPDATE_ERROR).as_bytes()) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED => { + writer + .write_all( + stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED) + .as_bytes(), + ) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED => { + writer + .write_all( + stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED) + .as_bytes(), + ) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED => { + writer + .write_all( + stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED) + .as_bytes(), + ) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED => { + writer + .write_all( + stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED) + .as_bytes(), + ) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED => { + writer + .write_all( + stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED).as_bytes(), + ) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE => { + writer + .write_all( + stringify!( + CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE + ) + .as_bytes(), + ) + } + &cuda_types::CUgraphExecUpdateResult_enum::CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED => { + writer + .write_all( + stringify!(CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphExecUpdateResultInfo_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(result), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.result, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(errorNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.errorNode, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(errorFromNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.errorFromNode, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemPool_attribute_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES => { + writer + .write_all( + stringify!(CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES) + .as_bytes(), + ) + } + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC => { + writer + .write_all( + stringify!(CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC).as_bytes(), + ) + } + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES => { + writer + .write_all( + stringify!(CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES) + .as_bytes(), + ) + } + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD => { + writer + .write_all(stringify!(CU_MEMPOOL_ATTR_RELEASE_THRESHOLD).as_bytes()) + } + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT => { + writer + .write_all( + stringify!(CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT).as_bytes(), + ) + } + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH => { + writer + .write_all(stringify!(CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH).as_bytes()) + } + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_USED_MEM_CURRENT => { + writer.write_all(stringify!(CU_MEMPOOL_ATTR_USED_MEM_CURRENT).as_bytes()) + } + &cuda_types::CUmemPool_attribute_enum::CU_MEMPOOL_ATTR_USED_MEM_HIGH => { + writer.write_all(stringify!(CU_MEMPOOL_ATTR_USED_MEM_HIGH).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUmemPoolProps_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(allocType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.allocType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(handleTypes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.handleTypes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.location, "", 0, writer)?; + writer + .write_all( + concat!(", ", stringify!(win32SecurityAttributes), ": ").as_bytes(), + )?; + crate::format::CudaDisplay::write(&self.win32SecurityAttributes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(maxSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.maxSize, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS_v1_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(poolProps), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.poolProps, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(accessDescs), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.accessDescs, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(accessDescCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.accessDescCount, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.bytesize, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dptr, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS_v2_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(poolProps), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.poolProps, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(accessDescs), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.accessDescs, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(accessDescCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.accessDescCount, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.bytesize, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dptr, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEM_FREE_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dptr, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphMem_attribute_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT => { + writer + .write_all(stringify!(CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT).as_bytes()) + } + &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH => { + writer.write_all(stringify!(CU_GRAPH_MEM_ATTR_USED_MEM_HIGH).as_bytes()) + } + &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT => { + writer + .write_all( + stringify!(CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT).as_bytes(), + ) + } + &cuda_types::CUgraphMem_attribute_enum::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH => { + writer + .write_all( + stringify!(CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_CHILD_GRAPH_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(graph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.graph, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EVENT_RECORD_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.event, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_EVENT_WAIT_NODE_PARAMS_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.event, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesOptions_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUflushGPUDirectRDMAWritesOptions_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST => { + writer + .write_all( + stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST) + .as_bytes(), + ) + } + &cuda_types::CUflushGPUDirectRDMAWritesOptions_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS => { + writer + .write_all( + stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUGPUDirectRDMAWritesOrdering_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE => { + writer + .write_all( + stringify!(CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE).as_bytes(), + ) + } + &cuda_types::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER => { + writer + .write_all( + stringify!(CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER).as_bytes(), + ) + } + &cuda_types::CUGPUDirectRDMAWritesOrdering_enum::CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES => { + writer + .write_all( + stringify!(CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesScope_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUflushGPUDirectRDMAWritesScope_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER => { + writer + .write_all( + stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER).as_bytes(), + ) + } + &cuda_types::CUflushGPUDirectRDMAWritesScope_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES => { + writer + .write_all( + stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUflushGPUDirectRDMAWritesTarget_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUflushGPUDirectRDMAWritesTarget_enum::CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX => { + writer + .write_all( + stringify!(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphDebugDot_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE => { + writer.write_all(stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE).as_bytes()) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES).as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS).as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS).as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES => { + writer.write_all(stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES).as_bytes()) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS) + .as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO).as_bytes(), + ) + } + &cuda_types::CUgraphDebugDot_flags_enum::CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS => { + writer + .write_all( + stringify!(CU_GRAPH_DEBUG_DOT_FLAGS_CONDITIONAL_NODE_PARAMS) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUuserObject_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUuserObject_flags_enum::CU_USER_OBJECT_NO_DESTRUCTOR_SYNC => { + writer + .write_all(stringify!(CU_USER_OBJECT_NO_DESTRUCTOR_SYNC).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUuserObjectRetain_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUuserObjectRetain_flags_enum::CU_GRAPH_USER_OBJECT_MOVE => { + writer.write_all(stringify!(CU_GRAPH_USER_OBJECT_MOVE).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUgraphInstantiate_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH => { + writer + .write_all( + stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH) + .as_bytes(), + ) + } + &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD => { + writer + .write_all(stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD).as_bytes()) + } + &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH => { + writer + .write_all( + stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH).as_bytes(), + ) + } + &cuda_types::CUgraphInstantiate_flags_enum::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY => { + writer + .write_all( + stringify!(CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY) + .as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUdeviceNumaConfig_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUdeviceNumaConfig_enum::CU_DEVICE_NUMA_CONFIG_NONE => { + writer.write_all(stringify!(CU_DEVICE_NUMA_CONFIG_NONE).as_bytes()) + } + &cuda_types::CUdeviceNumaConfig_enum::CU_DEVICE_NUMA_CONFIG_NUMA_NODE => { + writer.write_all(stringify!(CU_DEVICE_NUMA_CONFIG_NUMA_NODE).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +pub fn write_cuGetErrorString( + writer: &mut (impl std::io::Write + ?Sized), + error: cuda_types::CUresult, + pStr: *mut *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(error), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&error, "cuGetErrorString", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pStr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pStr, "cuGetErrorString", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGetErrorName( + writer: &mut (impl std::io::Write + ?Sized), + error: cuda_types::CUresult, + pStr: *mut *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(error), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&error, "cuGetErrorName", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pStr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pStr, "cuGetErrorName", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuInit( + writer: &mut (impl std::io::Write + ?Sized), + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuInit", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDriverGetVersion( + writer: &mut (impl std::io::Write + ?Sized), + driverVersion: *mut ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(driverVersion), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &driverVersion, + "cuDriverGetVersion", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGet( + writer: &mut (impl std::io::Write + ?Sized), + device: *mut cuda_types::CUdevice, + ordinal: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuDeviceGet", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ordinal), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ordinal, "cuDeviceGet", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetCount( + writer: &mut (impl std::io::Write + ?Sized), + count: *mut ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuDeviceGetCount", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetName( + writer: &mut (impl std::io::Write + ?Sized), + name: *mut ::core::ffi::c_char, + len: ::core::ffi::c_int, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuDeviceGetName", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(len), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&len, "cuDeviceGetName", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetName", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetUuid( + writer: &mut (impl std::io::Write + ?Sized), + uuid: *mut cuda_types::CUuuid, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(uuid), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uuid, "cuDeviceGetUuid", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetUuid", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetUuid_v2( + writer: &mut (impl std::io::Write + ?Sized), + uuid: *mut cuda_types::CUuuid, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(uuid), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uuid, "cuDeviceGetUuid_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetUuid_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetLuid( + writer: &mut (impl std::io::Write + ?Sized), + luid: *mut ::core::ffi::c_char, + deviceNodeMask: *mut ::core::ffi::c_uint, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(luid), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&luid, "cuDeviceGetLuid", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(deviceNodeMask), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &deviceNodeMask, + "cuDeviceGetLuid", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetLuid", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceTotalMem_v2( + writer: &mut (impl std::io::Write + ?Sized), + bytes: *mut usize, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuDeviceTotalMem_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceTotalMem_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetTexture1DLinearMaxWidth( + writer: &mut (impl std::io::Write + ?Sized), + maxWidthInElements: *mut usize, + format: cuda_types::CUarray_format, + numChannels: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(maxWidthInElements), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &maxWidthInElements, + "cuDeviceGetTexture1DLinearMaxWidth", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(format), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &format, + "cuDeviceGetTexture1DLinearMaxWidth", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numChannels, + "cuDeviceGetTexture1DLinearMaxWidth", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDeviceGetTexture1DLinearMaxWidth", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + pi: *mut ::core::ffi::c_int, + attrib: cuda_types::CUdevice_attribute, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pi), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pi, "cuDeviceGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&attrib, "cuDeviceGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetNvSciSyncAttributes( + writer: &mut (impl std::io::Write + ?Sized), + nvSciSyncAttrList: *mut ::core::ffi::c_void, + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(nvSciSyncAttrList), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nvSciSyncAttrList, + "cuDeviceGetNvSciSyncAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDeviceGetNvSciSyncAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuDeviceGetNvSciSyncAttributes", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceSetMemPool( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, + pool: cuda_types::CUmemoryPool, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceSetMemPool", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuDeviceSetMemPool", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetMemPool( + writer: &mut (impl std::io::Write + ?Sized), + pool: *mut cuda_types::CUmemoryPool, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuDeviceGetMemPool", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetMemPool", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetDefaultMemPool( + writer: &mut (impl std::io::Write + ?Sized), + pool_out: *mut cuda_types::CUmemoryPool, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pool_out, + "cuDeviceGetDefaultMemPool", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDeviceGetDefaultMemPool", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetExecAffinitySupport( + writer: &mut (impl std::io::Write + ?Sized), + pi: *mut ::core::ffi::c_int, + type_: cuda_types::CUexecAffinityType, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pi), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pi, + "cuDeviceGetExecAffinitySupport", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &type_, + "cuDeviceGetExecAffinitySupport", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDeviceGetExecAffinitySupport", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuFlushGPUDirectRDMAWrites( + writer: &mut (impl std::io::Write + ?Sized), + target: cuda_types::CUflushGPUDirectRDMAWritesTarget, + scope: cuda_types::CUflushGPUDirectRDMAWritesScope, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(target), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &target, + "cuFlushGPUDirectRDMAWrites", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(scope), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &scope, + "cuFlushGPUDirectRDMAWrites", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetProperties( + writer: &mut (impl std::io::Write + ?Sized), + prop: *mut cuda_types::CUdevprop, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(prop), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&prop, "cuDeviceGetProperties", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetProperties", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceComputeCapability( + writer: &mut (impl std::io::Write + ?Sized), + major: *mut ::core::ffi::c_int, + minor: *mut ::core::ffi::c_int, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(major), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &major, + "cuDeviceComputeCapability", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(minor), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &minor, + "cuDeviceComputeCapability", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDeviceComputeCapability", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxRetain( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pctx, + "cuDevicePrimaryCtxRetain", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDevicePrimaryCtxRetain", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxRelease_v2( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDevicePrimaryCtxRelease_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxSetFlags_v2( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDevicePrimaryCtxSetFlags_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuDevicePrimaryCtxSetFlags_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxGetState( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, + flags: *mut ::core::ffi::c_uint, + active: *mut ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDevicePrimaryCtxGetState", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuDevicePrimaryCtxGetState", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(active), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &active, + "cuDevicePrimaryCtxGetState", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxReset_v2( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDevicePrimaryCtxReset_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuCtxCreate_v2( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuCtxCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuCtxCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuCtxCreate_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxCreate_v3( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, + paramsArray: *mut cuda_types::CUexecAffinityParam, + numParams: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuCtxCreate_v3", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramsArray), ": ").as_bytes())?; + writer.write_all(b"[")?; + for i in 0..numParams { + if i != 0 { + writer.write_all(b", ")?; + } + crate::format::CudaDisplay::write( + unsafe { &*paramsArray.add(i as usize) }, + "cuCtxCreate_v3", + arg_idx, + writer, + )?; + } + writer.write_all(b"]")?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numParams, "cuCtxCreate_v3", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuCtxCreate_v3", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuCtxCreate_v3", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxDestroy_v2( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxDestroy_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxPushCurrent_v2( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxPushCurrent_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxPopCurrent_v2( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuCtxPopCurrent_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxSetCurrent( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxSetCurrent", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetCurrent( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuCtxGetCurrent", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetDevice( + writer: &mut (impl std::io::Write + ?Sized), + device: *mut cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuCtxGetDevice", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetFlags( + writer: &mut (impl std::io::Write + ?Sized), + flags: *mut ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuCtxGetFlags", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxSetFlags( + writer: &mut (impl std::io::Write + ?Sized), + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuCtxSetFlags", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetId( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, + ctxId: *mut ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxGetId", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ctxId), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctxId, "cuCtxGetId", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxSynchronize( + writer: &mut (impl std::io::Write + ?Sized), +) -> std::io::Result<()> { + writer.write_all(b"()") +} +pub fn write_cuCtxSetLimit( + writer: &mut (impl std::io::Write + ?Sized), + limit: cuda_types::CUlimit, + value: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(limit), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&limit, "cuCtxSetLimit", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuCtxSetLimit", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetLimit( + writer: &mut (impl std::io::Write + ?Sized), + pvalue: *mut usize, + limit: cuda_types::CUlimit, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pvalue), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pvalue, "cuCtxGetLimit", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(limit), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&limit, "cuCtxGetLimit", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetCacheConfig( + writer: &mut (impl std::io::Write + ?Sized), + pconfig: *mut cuda_types::CUfunc_cache, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pconfig), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pconfig, "cuCtxGetCacheConfig", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxSetCacheConfig( + writer: &mut (impl std::io::Write + ?Sized), + config: cuda_types::CUfunc_cache, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&config, "cuCtxSetCacheConfig", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetApiVersion( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, + version: *mut ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxGetApiVersion", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(version), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&version, "cuCtxGetApiVersion", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetStreamPriorityRange( + writer: &mut (impl std::io::Write + ?Sized), + leastPriority: *mut ::core::ffi::c_int, + greatestPriority: *mut ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(leastPriority), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &leastPriority, + "cuCtxGetStreamPriorityRange", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(greatestPriority), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &greatestPriority, + "cuCtxGetStreamPriorityRange", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuCtxResetPersistingL2Cache( + writer: &mut (impl std::io::Write + ?Sized), +) -> std::io::Result<()> { + writer.write_all(b"()") +} +pub fn write_cuCtxGetExecAffinity( + writer: &mut (impl std::io::Write + ?Sized), + pExecAffinity: *mut cuda_types::CUexecAffinityParam, + type_: cuda_types::CUexecAffinityType, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pExecAffinity), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pExecAffinity, + "cuCtxGetExecAffinity", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&type_, "cuCtxGetExecAffinity", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxAttach( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuCtxAttach", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuCtxAttach", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxDetach( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxDetach", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxGetSharedMemConfig( + writer: &mut (impl std::io::Write + ?Sized), + pConfig: *mut cuda_types::CUsharedconfig, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pConfig), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pConfig, + "cuCtxGetSharedMemConfig", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuCtxSetSharedMemConfig( + writer: &mut (impl std::io::Write + ?Sized), + config: cuda_types::CUsharedconfig, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &config, + "cuCtxSetSharedMemConfig", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuModuleLoad( + writer: &mut (impl std::io::Write + ?Sized), + module: *mut cuda_types::CUmodule, + fname: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(module), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&module, "cuModuleLoad", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fname), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&fname, "cuModuleLoad", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuModuleLoadData( + writer: &mut (impl std::io::Write + ?Sized), + module: *mut cuda_types::CUmodule, + image: *const ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(module), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&module, "cuModuleLoadData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(image), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&image, "cuModuleLoadData", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuModuleLoadDataEx( + writer: &mut (impl std::io::Write + ?Sized), + module: *mut cuda_types::CUmodule, + image: *const ::core::ffi::c_void, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(module), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&module, "cuModuleLoadDataEx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(image), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&image, "cuModuleLoadDataEx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numOptions, + "cuModuleLoadDataEx", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(options), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&options, "cuModuleLoadDataEx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(optionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &optionValues, + "cuModuleLoadDataEx", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuModuleLoadFatBinary( + writer: &mut (impl std::io::Write + ?Sized), + module: *mut cuda_types::CUmodule, + fatCubin: *const ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(module), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &module, + "cuModuleLoadFatBinary", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fatCubin), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &fatCubin, + "cuModuleLoadFatBinary", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuModuleUnload( + writer: &mut (impl std::io::Write + ?Sized), + hmod: cuda_types::CUmodule, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hmod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hmod, "cuModuleUnload", arg_idx, writer)?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUmoduleLoadingMode_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUmoduleLoadingMode_enum::CU_MODULE_EAGER_LOADING => { + writer.write_all(stringify!(CU_MODULE_EAGER_LOADING).as_bytes()) + } + &cuda_types::CUmoduleLoadingMode_enum::CU_MODULE_LAZY_LOADING => { + writer.write_all(stringify!(CU_MODULE_LAZY_LOADING).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +pub fn write_cuModuleGetLoadingMode( + writer: &mut (impl std::io::Write + ?Sized), + mode: *mut cuda_types::CUmoduleLoadingMode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&mode, "cuModuleGetLoadingMode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuModuleGetFunction( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: *mut cuda_types::CUfunction, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuModuleGetFunction", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hmod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hmod, "cuModuleGetFunction", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuModuleGetFunction", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuModuleGetFunctionCount( + writer: &mut (impl std::io::Write + ?Sized), + count: *mut ::core::ffi::c_uint, + mod_: cuda_types::CUmodule, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuModuleGetFunctionCount", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mod_), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mod_, + "cuModuleGetFunctionCount", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuModuleEnumerateFunctions( + writer: &mut (impl std::io::Write + ?Sized), + functions: *mut cuda_types::CUfunction, + numFunctions: ::core::ffi::c_uint, + mod_: cuda_types::CUmodule, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(functions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &functions, + "cuModuleEnumerateFunctions", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numFunctions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numFunctions, + "cuModuleEnumerateFunctions", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mod_), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mod_, + "cuModuleEnumerateFunctions", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuModuleGetGlobal_v2( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytes: *mut usize, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuModuleGetGlobal_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuModuleGetGlobal_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hmod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hmod, "cuModuleGetGlobal_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuModuleGetGlobal_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLinkCreate_v2( + writer: &mut (impl std::io::Write + ?Sized), + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + stateOut: *mut cuda_types::CUlinkState, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(numOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numOptions, "cuLinkCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(options), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&options, "cuLinkCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(optionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &optionValues, + "cuLinkCreate_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stateOut), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&stateOut, "cuLinkCreate_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLinkAddData_v2( + writer: &mut (impl std::io::Write + ?Sized), + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + data: *mut ::core::ffi::c_void, + size: usize, + name: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(state), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&state, "cuLinkAddData_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&type_, "cuLinkAddData_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(data), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&data, "cuLinkAddData_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuLinkAddData_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuLinkAddData_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numOptions, "cuLinkAddData_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(options), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&options, "cuLinkAddData_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(optionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &optionValues, + "cuLinkAddData_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLinkAddFile_v2( + writer: &mut (impl std::io::Write + ?Sized), + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + path: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(state), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&state, "cuLinkAddFile_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&type_, "cuLinkAddFile_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(path), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&path, "cuLinkAddFile_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numOptions, "cuLinkAddFile_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(options), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&options, "cuLinkAddFile_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(optionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &optionValues, + "cuLinkAddFile_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLinkComplete( + writer: &mut (impl std::io::Write + ?Sized), + state: cuda_types::CUlinkState, + cubinOut: *mut *mut ::core::ffi::c_void, + sizeOut: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(state), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&state, "cuLinkComplete", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(cubinOut), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&cubinOut, "cuLinkComplete", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(sizeOut), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&sizeOut, "cuLinkComplete", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLinkDestroy( + writer: &mut (impl std::io::Write + ?Sized), + state: cuda_types::CUlinkState, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(state), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&state, "cuLinkDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuModuleGetTexRef( + writer: &mut (impl std::io::Write + ?Sized), + pTexRef: *mut cuda_types::CUtexref, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pTexRef, "cuModuleGetTexRef", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hmod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hmod, "cuModuleGetTexRef", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuModuleGetTexRef", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuModuleGetSurfRef( + writer: &mut (impl std::io::Write + ?Sized), + pSurfRef: *mut cuda_types::CUsurfref, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pSurfRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pSurfRef, "cuModuleGetSurfRef", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hmod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hmod, "cuModuleGetSurfRef", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuModuleGetSurfRef", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLibraryLoadData( + writer: &mut (impl std::io::Write + ?Sized), + library: *mut cuda_types::CUlibrary, + code: *const ::core::ffi::c_void, + jitOptions: *mut cuda_types::CUjit_option, + jitOptionsValues: *mut *mut ::core::ffi::c_void, + numJitOptions: ::core::ffi::c_uint, + libraryOptions: *mut cuda_types::CUlibraryOption, + libraryOptionValues: *mut *mut ::core::ffi::c_void, + numLibraryOptions: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&library, "cuLibraryLoadData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(code), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&code, "cuLibraryLoadData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(jitOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &jitOptions, + "cuLibraryLoadData", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(jitOptionsValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &jitOptionsValues, + "cuLibraryLoadData", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numJitOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numJitOptions, + "cuLibraryLoadData", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(libraryOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &libraryOptions, + "cuLibraryLoadData", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(libraryOptionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &libraryOptionValues, + "cuLibraryLoadData", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numLibraryOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numLibraryOptions, + "cuLibraryLoadData", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLibraryLoadFromFile( + writer: &mut (impl std::io::Write + ?Sized), + library: *mut cuda_types::CUlibrary, + fileName: *const ::core::ffi::c_char, + jitOptions: *mut cuda_types::CUjit_option, + jitOptionsValues: *mut *mut ::core::ffi::c_void, + numJitOptions: ::core::ffi::c_uint, + libraryOptions: *mut cuda_types::CUlibraryOption, + libraryOptionValues: *mut *mut ::core::ffi::c_void, + numLibraryOptions: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &library, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fileName), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &fileName, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(jitOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &jitOptions, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(jitOptionsValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &jitOptionsValues, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numJitOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numJitOptions, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(libraryOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &libraryOptions, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(libraryOptionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &libraryOptionValues, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numLibraryOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numLibraryOptions, + "cuLibraryLoadFromFile", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLibraryUnload( + writer: &mut (impl std::io::Write + ?Sized), + library: cuda_types::CUlibrary, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&library, "cuLibraryUnload", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLibraryGetKernel( + writer: &mut (impl std::io::Write + ?Sized), + pKernel: *mut cuda_types::CUkernel, + library: cuda_types::CUlibrary, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pKernel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pKernel, "cuLibraryGetKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&library, "cuLibraryGetKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuLibraryGetKernel", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLibraryGetKernelCount( + writer: &mut (impl std::io::Write + ?Sized), + count: *mut ::core::ffi::c_uint, + lib: cuda_types::CUlibrary, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuLibraryGetKernelCount", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(lib), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&lib, "cuLibraryGetKernelCount", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLibraryEnumerateKernels( + writer: &mut (impl std::io::Write + ?Sized), + kernels: *mut cuda_types::CUkernel, + numKernels: ::core::ffi::c_uint, + lib: cuda_types::CUlibrary, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(kernels), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &kernels, + "cuLibraryEnumerateKernels", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numKernels), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numKernels, + "cuLibraryEnumerateKernels", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(lib), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &lib, + "cuLibraryEnumerateKernels", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLibraryGetModule( + writer: &mut (impl std::io::Write + ?Sized), + pMod: *mut cuda_types::CUmodule, + library: cuda_types::CUlibrary, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pMod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pMod, "cuLibraryGetModule", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&library, "cuLibraryGetModule", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuKernelGetFunction( + writer: &mut (impl std::io::Write + ?Sized), + pFunc: *mut cuda_types::CUfunction, + kernel: cuda_types::CUkernel, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pFunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pFunc, "cuKernelGetFunction", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&kernel, "cuKernelGetFunction", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLibraryGetGlobal( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytes: *mut usize, + library: cuda_types::CUlibrary, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuLibraryGetGlobal", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuLibraryGetGlobal", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&library, "cuLibraryGetGlobal", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuLibraryGetGlobal", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLibraryGetManaged( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytes: *mut usize, + library: cuda_types::CUlibrary, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuLibraryGetManaged", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuLibraryGetManaged", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&library, "cuLibraryGetManaged", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuLibraryGetManaged", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLibraryGetUnifiedFunction( + writer: &mut (impl std::io::Write + ?Sized), + fptr: *mut *mut ::core::ffi::c_void, + library: cuda_types::CUlibrary, + symbol: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(fptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &fptr, + "cuLibraryGetUnifiedFunction", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(library), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &library, + "cuLibraryGetUnifiedFunction", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(symbol), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &symbol, + "cuLibraryGetUnifiedFunction", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuKernelGetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + pi: *mut ::core::ffi::c_int, + attrib: cuda_types::CUfunction_attribute, + kernel: cuda_types::CUkernel, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pi), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pi, "cuKernelGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&attrib, "cuKernelGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&kernel, "cuKernelGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuKernelGetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuKernelSetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + attrib: cuda_types::CUfunction_attribute, + val: ::core::ffi::c_int, + kernel: cuda_types::CUkernel, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&attrib, "cuKernelSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(val), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&val, "cuKernelSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&kernel, "cuKernelSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuKernelSetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuKernelSetCacheConfig( + writer: &mut (impl std::io::Write + ?Sized), + kernel: cuda_types::CUkernel, + config: cuda_types::CUfunc_cache, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(kernel), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &kernel, + "cuKernelSetCacheConfig", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &config, + "cuKernelSetCacheConfig", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuKernelSetCacheConfig", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuKernelGetName( + writer: &mut (impl std::io::Write + ?Sized), + name: *mut *const ::core::ffi::c_char, + hfunc: cuda_types::CUkernel, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuKernelGetName", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuKernelGetName", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuKernelGetParamInfo( + writer: &mut (impl std::io::Write + ?Sized), + kernel: cuda_types::CUkernel, + paramIndex: usize, + paramOffset: *mut usize, + paramSize: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(kernel), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&kernel, "cuKernelGetParamInfo", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramIndex), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶mIndex, + "cuKernelGetParamInfo", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶mOffset, + "cuKernelGetParamInfo", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶mSize, + "cuKernelGetParamInfo", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemGetInfo_v2( + writer: &mut (impl std::io::Write + ?Sized), + free: *mut usize, + total: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(free), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&free, "cuMemGetInfo_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(total), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&total, "cuMemGetInfo_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAlloc_v2( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemAlloc_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemAlloc_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocPitch_v2( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + pPitch: *mut usize, + WidthInBytes: usize, + Height: usize, + ElementSizeBytes: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemAllocPitch_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pPitch, "cuMemAllocPitch_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(WidthInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &WidthInBytes, + "cuMemAllocPitch_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemAllocPitch_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ElementSizeBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ElementSizeBytes, + "cuMemAllocPitch_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemFree_v2( + writer: &mut (impl std::io::Write + ?Sized), + dptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemFree_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemGetAddressRange_v2( + writer: &mut (impl std::io::Write + ?Sized), + pbase: *mut cuda_types::CUdeviceptr, + psize: *mut usize, + dptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pbase), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pbase, + "cuMemGetAddressRange_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(psize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &psize, + "cuMemGetAddressRange_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuMemGetAddressRange_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemAllocHost_v2( + writer: &mut (impl std::io::Write + ?Sized), + pp: *mut *mut ::core::ffi::c_void, + bytesize: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pp), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pp, "cuMemAllocHost_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemAllocHost_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemFreeHost( + writer: &mut (impl std::io::Write + ?Sized), + p: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(p), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&p, "cuMemFreeHost", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemHostAlloc( + writer: &mut (impl std::io::Write + ?Sized), + pp: *mut *mut ::core::ffi::c_void, + bytesize: usize, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pp), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pp, "cuMemHostAlloc", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemHostAlloc", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuMemHostAlloc", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemHostGetDevicePointer_v2( + writer: &mut (impl std::io::Write + ?Sized), + pdptr: *mut cuda_types::CUdeviceptr, + p: *mut ::core::ffi::c_void, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pdptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pdptr, + "cuMemHostGetDevicePointer_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(p), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &p, + "cuMemHostGetDevicePointer_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Flags, + "cuMemHostGetDevicePointer_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemHostGetFlags( + writer: &mut (impl std::io::Write + ?Sized), + pFlags: *mut ::core::ffi::c_uint, + p: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pFlags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pFlags, "cuMemHostGetFlags", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(p), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&p, "cuMemHostGetFlags", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocManaged( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemAllocManaged", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemAllocManaged", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMemAllocManaged", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceRegisterAsyncNotification( + writer: &mut (impl std::io::Write + ?Sized), + device: cuda_types::CUdevice, + callbackFunc: cuda_types::CUasyncCallback, + userData: *mut ::core::ffi::c_void, + callback: *mut cuda_types::CUasyncCallbackHandle, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &device, + "cuDeviceRegisterAsyncNotification", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(callbackFunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &callbackFunc, + "cuDeviceRegisterAsyncNotification", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(userData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &userData, + "cuDeviceRegisterAsyncNotification", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(callback), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &callback, + "cuDeviceRegisterAsyncNotification", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceUnregisterAsyncNotification( + writer: &mut (impl std::io::Write + ?Sized), + device: cuda_types::CUdevice, + callback: cuda_types::CUasyncCallbackHandle, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &device, + "cuDeviceUnregisterAsyncNotification", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(callback), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &callback, + "cuDeviceUnregisterAsyncNotification", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetByPCIBusId( + writer: &mut (impl std::io::Write + ?Sized), + dev: *mut cuda_types::CUdevice, + pciBusId: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetByPCIBusId", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pciBusId), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pciBusId, + "cuDeviceGetByPCIBusId", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetPCIBusId( + writer: &mut (impl std::io::Write + ?Sized), + pciBusId: *mut ::core::ffi::c_char, + len: ::core::ffi::c_int, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pciBusId), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pciBusId, + "cuDeviceGetPCIBusId", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(len), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&len, "cuDeviceGetPCIBusId", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceGetPCIBusId", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuIpcGetEventHandle( + writer: &mut (impl std::io::Write + ?Sized), + pHandle: *mut cuda_types::CUipcEventHandle, + event: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pHandle, "cuIpcGetEventHandle", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&event, "cuIpcGetEventHandle", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuIpcOpenEventHandle( + writer: &mut (impl std::io::Write + ?Sized), + phEvent: *mut cuda_types::CUevent, + handle: cuda_types::CUipcEventHandle, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phEvent, + "cuIpcOpenEventHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&handle, "cuIpcOpenEventHandle", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuIpcGetMemHandle( + writer: &mut (impl std::io::Write + ?Sized), + pHandle: *mut cuda_types::CUipcMemHandle, + dptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pHandle, "cuIpcGetMemHandle", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuIpcGetMemHandle", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuIpcOpenMemHandle_v2( + writer: &mut (impl std::io::Write + ?Sized), + pdptr: *mut cuda_types::CUdeviceptr, + handle: cuda_types::CUipcMemHandle, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pdptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pdptr, "cuIpcOpenMemHandle_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle, + "cuIpcOpenMemHandle_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuIpcOpenMemHandle_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuIpcCloseMemHandle( + writer: &mut (impl std::io::Write + ?Sized), + dptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuIpcCloseMemHandle", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemHostRegister_v2( + writer: &mut (impl std::io::Write + ?Sized), + p: *mut ::core::ffi::c_void, + bytesize: usize, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(p), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&p, "cuMemHostRegister_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bytesize, + "cuMemHostRegister_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuMemHostRegister_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemHostUnregister( + writer: &mut (impl std::io::Write + ?Sized), + p: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(p), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&p, "cuMemHostUnregister", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dst, "cuMemcpy_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(src), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&src, "cuMemcpy_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpy_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyPeer_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyPeer_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstContext, + "cuMemcpyPeer_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyPeer_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcContext, + "cuMemcpyPeer_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyPeer_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoD_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyHtoD_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcHost, + "cuMemcpyHtoD_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyHtoD_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoH_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstHost, + "cuMemcpyDtoH_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyDtoH_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyDtoH_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoD_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyDtoD_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyDtoD_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyDtoD_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoA_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstArray, + "cuMemcpyDtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstOffset, + "cuMemcpyDtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyDtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyDtoA_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoD_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyAtoD_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcArray, + "cuMemcpyAtoD_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcOffset, + "cuMemcpyAtoD_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyAtoD_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoA_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstArray, + "cuMemcpyHtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstOffset, + "cuMemcpyHtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcHost, + "cuMemcpyHtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyHtoA_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoH_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstHost, + "cuMemcpyAtoH_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcArray, + "cuMemcpyAtoH_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcOffset, + "cuMemcpyAtoH_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyAtoH_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoA_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstArray, + "cuMemcpyAtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstOffset, + "cuMemcpyAtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcArray, + "cuMemcpyAtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcOffset, + "cuMemcpyAtoA_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyAtoA_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2D_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy2D_v2_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2DUnaligned_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCopy, + "cuMemcpy2DUnaligned_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3D_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3D_v2_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3DPeer_ptds( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3DPeer_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dst, "cuMemcpyAsync_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(src), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&src, "cuMemcpyAsync_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyAsync_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyPeerAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyPeerAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstContext, + "cuMemcpyPeerAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyPeerAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcContext, + "cuMemcpyPeerAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyPeerAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyPeerAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoDAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyHtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcHost, + "cuMemcpyHtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyHtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyHtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoHAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstHost, + "cuMemcpyDtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyDtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyDtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyDtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoDAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyDtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyDtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyDtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyDtoDAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoAAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstArray, + "cuMemcpyHtoAAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstOffset, + "cuMemcpyHtoAAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcHost, + "cuMemcpyHtoAAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyHtoAAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyHtoAAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoHAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstHost, + "cuMemcpyAtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcArray, + "cuMemcpyAtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcOffset, + "cuMemcpyAtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyAtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyAtoHAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2DAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCopy, + "cuMemcpy2DAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpy2DAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3DAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCopy, + "cuMemcpy3DAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpy3DAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3DPeerAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCopy, + "cuMemcpy3DPeerAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpy3DPeerAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD8_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD8_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD8_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD8_v2_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD16_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD16_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD16_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD16_v2_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD32_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD32_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD32_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD32_v2_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D8_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D8_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstPitch, + "cuMemsetD2D8_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD2D8_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D8_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D8_v2_ptds", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D16_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D16_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstPitch, + "cuMemsetD2D16_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD2D16_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D16_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Height, + "cuMemsetD2D16_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D32_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D32_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstPitch, + "cuMemsetD2D32_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD2D32_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D32_v2_ptds", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Height, + "cuMemsetD2D32_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD8Async_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD8Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD8Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD8Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemsetD8Async_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD16Async_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD16Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD16Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD16Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemsetD16Async_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD32Async_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD32Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD32Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD32Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemsetD32Async_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D8Async_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D8Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstPitch, + "cuMemsetD2D8Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD2D8Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Width, + "cuMemsetD2D8Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Height, + "cuMemsetD2D8Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemsetD2D8Async_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D16Async_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D16Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstPitch, + "cuMemsetD2D16Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD2D16Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Width, + "cuMemsetD2D16Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Height, + "cuMemsetD2D16Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemsetD2D16Async_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D32Async_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D32Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstPitch, + "cuMemsetD2D32Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD2D32Async_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Width, + "cuMemsetD2D32Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Height, + "cuMemsetD2D32Async_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemsetD2D32Async_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArrayCreate_v2( + writer: &mut (impl std::io::Write + ?Sized), + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pHandle, "cuArrayCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pAllocateArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pAllocateArray, + "cuArrayCreate_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArrayGetDescriptor_v2( + writer: &mut (impl std::io::Write + ?Sized), + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR, + hArray: cuda_types::CUarray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pArrayDescriptor), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pArrayDescriptor, + "cuArrayGetDescriptor_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hArray, + "cuArrayGetDescriptor_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArrayGetSparseProperties( + writer: &mut (impl std::io::Write + ?Sized), + sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES, + array: cuda_types::CUarray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(sparseProperties), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &sparseProperties, + "cuArrayGetSparseProperties", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(array), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &array, + "cuArrayGetSparseProperties", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMipmappedArrayGetSparseProperties( + writer: &mut (impl std::io::Write + ?Sized), + sparseProperties: *mut cuda_types::CUDA_ARRAY_SPARSE_PROPERTIES, + mipmap: cuda_types::CUmipmappedArray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(sparseProperties), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &sparseProperties, + "cuMipmappedArrayGetSparseProperties", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mipmap), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mipmap, + "cuMipmappedArrayGetSparseProperties", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArrayGetMemoryRequirements( + writer: &mut (impl std::io::Write + ?Sized), + memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS, + array: cuda_types::CUarray, + device: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(memoryRequirements), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &memoryRequirements, + "cuArrayGetMemoryRequirements", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(array), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &array, + "cuArrayGetMemoryRequirements", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &device, + "cuArrayGetMemoryRequirements", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMipmappedArrayGetMemoryRequirements( + writer: &mut (impl std::io::Write + ?Sized), + memoryRequirements: *mut cuda_types::CUDA_ARRAY_MEMORY_REQUIREMENTS, + mipmap: cuda_types::CUmipmappedArray, + device: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(memoryRequirements), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &memoryRequirements, + "cuMipmappedArrayGetMemoryRequirements", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mipmap), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mipmap, + "cuMipmappedArrayGetMemoryRequirements", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &device, + "cuMipmappedArrayGetMemoryRequirements", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArrayGetPlane( + writer: &mut (impl std::io::Write + ?Sized), + pPlaneArray: *mut cuda_types::CUarray, + hArray: cuda_types::CUarray, + planeIdx: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pPlaneArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pPlaneArray, "cuArrayGetPlane", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hArray, "cuArrayGetPlane", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(planeIdx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&planeIdx, "cuArrayGetPlane", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuArrayDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hArray: cuda_types::CUarray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hArray, "cuArrayDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuArray3DCreate_v2( + writer: &mut (impl std::io::Write + ?Sized), + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pHandle, "cuArray3DCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pAllocateArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pAllocateArray, + "cuArray3DCreate_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArray3DGetDescriptor_v2( + writer: &mut (impl std::io::Write + ?Sized), + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR, + hArray: cuda_types::CUarray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pArrayDescriptor), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pArrayDescriptor, + "cuArray3DGetDescriptor_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hArray, + "cuArray3DGetDescriptor_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMipmappedArrayCreate( + writer: &mut (impl std::io::Write + ?Sized), + pHandle: *mut cuda_types::CUmipmappedArray, + pMipmappedArrayDesc: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR, + numMipmapLevels: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pHandle, + "cuMipmappedArrayCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pMipmappedArrayDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pMipmappedArrayDesc, + "cuMipmappedArrayCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numMipmapLevels), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numMipmapLevels, + "cuMipmappedArrayCreate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMipmappedArrayGetLevel( + writer: &mut (impl std::io::Write + ?Sized), + pLevelArray: *mut cuda_types::CUarray, + hMipmappedArray: cuda_types::CUmipmappedArray, + level: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pLevelArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pLevelArray, + "cuMipmappedArrayGetLevel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hMipmappedArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hMipmappedArray, + "cuMipmappedArrayGetLevel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(level), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &level, + "cuMipmappedArrayGetLevel", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMipmappedArrayDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hMipmappedArray: cuda_types::CUmipmappedArray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hMipmappedArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hMipmappedArray, + "cuMipmappedArrayDestroy", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemGetHandleForAddressRange( + writer: &mut (impl std::io::Write + ?Sized), + handle: *mut ::core::ffi::c_void, + dptr: cuda_types::CUdeviceptr, + size: usize, + handleType: cuda_types::CUmemRangeHandleType, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle, + "cuMemGetHandleForAddressRange", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuMemGetHandleForAddressRange", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &size, + "cuMemGetHandleForAddressRange", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handleType), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handleType, + "cuMemGetHandleForAddressRange", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuMemGetHandleForAddressRange", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemAddressReserve( + writer: &mut (impl std::io::Write + ?Sized), + ptr: *mut cuda_types::CUdeviceptr, + size: usize, + alignment: usize, + addr: cuda_types::CUdeviceptr, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuMemAddressReserve", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMemAddressReserve", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(alignment), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &alignment, + "cuMemAddressReserve", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&addr, "cuMemAddressReserve", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMemAddressReserve", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAddressFree( + writer: &mut (impl std::io::Write + ?Sized), + ptr: cuda_types::CUdeviceptr, + size: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuMemAddressFree", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMemAddressFree", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemCreate( + writer: &mut (impl std::io::Write + ?Sized), + handle: *mut cuda_types::CUmemGenericAllocationHandle, + size: usize, + prop: *const cuda_types::CUmemAllocationProp, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&handle, "cuMemCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMemCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(prop), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&prop, "cuMemCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMemCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemRelease( + writer: &mut (impl std::io::Write + ?Sized), + handle: cuda_types::CUmemGenericAllocationHandle, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&handle, "cuMemRelease", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemMap( + writer: &mut (impl std::io::Write + ?Sized), + ptr: cuda_types::CUdeviceptr, + size: usize, + offset: usize, + handle: cuda_types::CUmemGenericAllocationHandle, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuMemMap", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMemMap", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(offset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&offset, "cuMemMap", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&handle, "cuMemMap", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMemMap", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemMapArrayAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + mapInfoList: *mut cuda_types::CUarrayMapInfo, + count: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mapInfoList), ": ").as_bytes())?; + writer.write_all(b"[")?; + for i in 0..count { + if i != 0 { + writer.write_all(b", ")?; + } + crate::format::CudaDisplay::write( + unsafe { &*mapInfoList.add(i as usize) }, + "cuMemMapArrayAsync_ptsz", + arg_idx, + writer, + )?; + } + writer.write_all(b"]")?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuMemMapArrayAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemMapArrayAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemUnmap( + writer: &mut (impl std::io::Write + ?Sized), + ptr: cuda_types::CUdeviceptr, + size: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuMemUnmap", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMemUnmap", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemSetAccess( + writer: &mut (impl std::io::Write + ?Sized), + ptr: cuda_types::CUdeviceptr, + size: usize, + desc: *const cuda_types::CUmemAccessDesc, + count: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuMemSetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMemSetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(desc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&desc, "cuMemSetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuMemSetAccess", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemGetAccess( + writer: &mut (impl std::io::Write + ?Sized), + flags: *mut ::core::ffi::c_ulonglong, + location: *const cuda_types::CUmemLocation, + ptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMemGetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&location, "cuMemGetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuMemGetAccess", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemExportToShareableHandle( + writer: &mut (impl std::io::Write + ?Sized), + shareableHandle: *mut ::core::ffi::c_void, + handle: cuda_types::CUmemGenericAllocationHandle, + handleType: cuda_types::CUmemAllocationHandleType, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(shareableHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &shareableHandle, + "cuMemExportToShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle, + "cuMemExportToShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handleType), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handleType, + "cuMemExportToShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuMemExportToShareableHandle", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemImportFromShareableHandle( + writer: &mut (impl std::io::Write + ?Sized), + handle: *mut cuda_types::CUmemGenericAllocationHandle, + osHandle: *mut ::core::ffi::c_void, + shHandleType: cuda_types::CUmemAllocationHandleType, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle, + "cuMemImportFromShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(osHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &osHandle, + "cuMemImportFromShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(shHandleType), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &shHandleType, + "cuMemImportFromShareableHandle", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemGetAllocationGranularity( + writer: &mut (impl std::io::Write + ?Sized), + granularity: *mut usize, + prop: *const cuda_types::CUmemAllocationProp, + option: cuda_types::CUmemAllocationGranularity_flags, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(granularity), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &granularity, + "cuMemGetAllocationGranularity", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(prop), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &prop, + "cuMemGetAllocationGranularity", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(option), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &option, + "cuMemGetAllocationGranularity", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemGetAllocationPropertiesFromHandle( + writer: &mut (impl std::io::Write + ?Sized), + prop: *mut cuda_types::CUmemAllocationProp, + handle: cuda_types::CUmemGenericAllocationHandle, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(prop), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &prop, + "cuMemGetAllocationPropertiesFromHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle, + "cuMemGetAllocationPropertiesFromHandle", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemRetainAllocationHandle( + writer: &mut (impl std::io::Write + ?Sized), + handle: *mut cuda_types::CUmemGenericAllocationHandle, + addr: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle, + "cuMemRetainAllocationHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuMemRetainAllocationHandle", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemFreeAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dptr: cuda_types::CUdeviceptr, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemFreeAsync_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemFreeAsync_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemAllocAsync_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bytesize, + "cuMemAllocAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemAllocAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemPoolTrimTo( + writer: &mut (impl std::io::Write + ?Sized), + pool: cuda_types::CUmemoryPool, + minBytesToKeep: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuMemPoolTrimTo", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(minBytesToKeep), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &minBytesToKeep, + "cuMemPoolTrimTo", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemPoolSetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + pool: cuda_types::CUmemoryPool, + attr: cuda_types::CUmemPool_attribute, + value: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuMemPoolSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&attr, "cuMemPoolSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuMemPoolSetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPoolGetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + pool: cuda_types::CUmemoryPool, + attr: cuda_types::CUmemPool_attribute, + value: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuMemPoolGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&attr, "cuMemPoolGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuMemPoolGetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPoolSetAccess( + writer: &mut (impl std::io::Write + ?Sized), + pool: cuda_types::CUmemoryPool, + map: *const cuda_types::CUmemAccessDesc, + count: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuMemPoolSetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(map), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&map, "cuMemPoolSetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuMemPoolSetAccess", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPoolGetAccess( + writer: &mut (impl std::io::Write + ?Sized), + flags: *mut cuda_types::CUmemAccess_flags, + memPool: cuda_types::CUmemoryPool, + location: *mut cuda_types::CUmemLocation, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMemPoolGetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(memPool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&memPool, "cuMemPoolGetAccess", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&location, "cuMemPoolGetAccess", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPoolCreate( + writer: &mut (impl std::io::Write + ?Sized), + pool: *mut cuda_types::CUmemoryPool, + poolProps: *const cuda_types::CUmemPoolProps, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuMemPoolCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(poolProps), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&poolProps, "cuMemPoolCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPoolDestroy( + writer: &mut (impl std::io::Write + ?Sized), + pool: cuda_types::CUmemoryPool, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuMemPoolDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocFromPoolAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + pool: cuda_types::CUmemoryPool, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuMemAllocFromPoolAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bytesize, + "cuMemAllocFromPoolAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pool, + "cuMemAllocFromPoolAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemAllocFromPoolAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemPoolExportToShareableHandle( + writer: &mut (impl std::io::Write + ?Sized), + handle_out: *mut ::core::ffi::c_void, + pool: cuda_types::CUmemoryPool, + handleType: cuda_types::CUmemAllocationHandleType, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(handle_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle_out, + "cuMemPoolExportToShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pool, + "cuMemPoolExportToShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handleType), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handleType, + "cuMemPoolExportToShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuMemPoolExportToShareableHandle", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemPoolImportFromShareableHandle( + writer: &mut (impl std::io::Write + ?Sized), + pool_out: *mut cuda_types::CUmemoryPool, + handle: *mut ::core::ffi::c_void, + handleType: cuda_types::CUmemAllocationHandleType, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pool_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pool_out, + "cuMemPoolImportFromShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handle, + "cuMemPoolImportFromShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handleType), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &handleType, + "cuMemPoolImportFromShareableHandle", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuMemPoolImportFromShareableHandle", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemPoolExportPointer( + writer: &mut (impl std::io::Write + ?Sized), + shareData_out: *mut cuda_types::CUmemPoolPtrExportData, + ptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(shareData_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &shareData_out, + "cuMemPoolExportPointer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuMemPoolExportPointer", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPoolImportPointer( + writer: &mut (impl std::io::Write + ?Sized), + ptr_out: *mut cuda_types::CUdeviceptr, + pool: cuda_types::CUmemoryPool, + shareData: *mut cuda_types::CUmemPoolPtrExportData, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ptr_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ptr_out, + "cuMemPoolImportPointer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pool, "cuMemPoolImportPointer", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(shareData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &shareData, + "cuMemPoolImportPointer", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMulticastCreate( + writer: &mut (impl std::io::Write + ?Sized), + mcHandle: *mut cuda_types::CUmemGenericAllocationHandle, + prop: *const cuda_types::CUmulticastObjectProp, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mcHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&mcHandle, "cuMulticastCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(prop), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&prop, "cuMulticastCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMulticastAddDevice( + writer: &mut (impl std::io::Write + ?Sized), + mcHandle: cuda_types::CUmemGenericAllocationHandle, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mcHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mcHandle, + "cuMulticastAddDevice", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuMulticastAddDevice", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMulticastBindMem( + writer: &mut (impl std::io::Write + ?Sized), + mcHandle: cuda_types::CUmemGenericAllocationHandle, + mcOffset: usize, + memHandle: cuda_types::CUmemGenericAllocationHandle, + memOffset: usize, + size: usize, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mcHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&mcHandle, "cuMulticastBindMem", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&mcOffset, "cuMulticastBindMem", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(memHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &memHandle, + "cuMulticastBindMem", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(memOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &memOffset, + "cuMulticastBindMem", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMulticastBindMem", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMulticastBindMem", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMulticastBindAddr( + writer: &mut (impl std::io::Write + ?Sized), + mcHandle: cuda_types::CUmemGenericAllocationHandle, + mcOffset: usize, + memptr: cuda_types::CUdeviceptr, + size: usize, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mcHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mcHandle, + "cuMulticastBindAddr", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mcOffset, + "cuMulticastBindAddr", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(memptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&memptr, "cuMulticastBindAddr", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMulticastBindAddr", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMulticastBindAddr", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMulticastUnbind( + writer: &mut (impl std::io::Write + ?Sized), + mcHandle: cuda_types::CUmemGenericAllocationHandle, + dev: cuda_types::CUdevice, + mcOffset: usize, + size: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mcHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&mcHandle, "cuMulticastUnbind", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuMulticastUnbind", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&mcOffset, "cuMulticastUnbind", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuMulticastUnbind", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMulticastGetGranularity( + writer: &mut (impl std::io::Write + ?Sized), + granularity: *mut usize, + prop: *const cuda_types::CUmulticastObjectProp, + option: cuda_types::CUmulticastGranularity_flags, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(granularity), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &granularity, + "cuMulticastGetGranularity", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(prop), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &prop, + "cuMulticastGetGranularity", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(option), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &option, + "cuMulticastGetGranularity", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuPointerGetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + data: *mut ::core::ffi::c_void, + attribute: cuda_types::CUpointer_attribute, + ptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(data), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&data, "cuPointerGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attribute), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attribute, + "cuPointerGetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuPointerGetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPrefetchAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + devPtr: cuda_types::CUdeviceptr, + count: usize, + dstDevice: cuda_types::CUdevice, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &devPtr, + "cuMemPrefetchAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuMemPrefetchAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemPrefetchAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemPrefetchAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemPrefetchAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + devPtr: cuda_types::CUdeviceptr, + count: usize, + location: cuda_types::CUmemLocation, + flags: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &devPtr, + "cuMemPrefetchAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuMemPrefetchAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &location, + "cuMemPrefetchAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuMemPrefetchAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemPrefetchAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemAdvise( + writer: &mut (impl std::io::Write + ?Sized), + devPtr: cuda_types::CUdeviceptr, + count: usize, + advice: cuda_types::CUmem_advise, + device: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&devPtr, "cuMemAdvise", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuMemAdvise", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(advice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&advice, "cuMemAdvise", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuMemAdvise", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAdvise_v2( + writer: &mut (impl std::io::Write + ?Sized), + devPtr: cuda_types::CUdeviceptr, + count: usize, + advice: cuda_types::CUmem_advise, + location: cuda_types::CUmemLocation, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&devPtr, "cuMemAdvise_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuMemAdvise_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(advice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&advice, "cuMemAdvise_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&location, "cuMemAdvise_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemRangeGetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + data: *mut ::core::ffi::c_void, + dataSize: usize, + attribute: cuda_types::CUmem_range_attribute, + devPtr: cuda_types::CUdeviceptr, + count: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(data), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&data, "cuMemRangeGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dataSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dataSize, + "cuMemRangeGetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attribute), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attribute, + "cuMemRangeGetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &devPtr, + "cuMemRangeGetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuMemRangeGetAttribute", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemRangeGetAttributes( + writer: &mut (impl std::io::Write + ?Sized), + data: *mut *mut ::core::ffi::c_void, + dataSizes: *mut usize, + attributes: *mut cuda_types::CUmem_range_attribute, + numAttributes: usize, + devPtr: cuda_types::CUdeviceptr, + count: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(data), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &data, + "cuMemRangeGetAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dataSizes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dataSizes, + "cuMemRangeGetAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attributes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attributes, + "cuMemRangeGetAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numAttributes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numAttributes, + "cuMemRangeGetAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &devPtr, + "cuMemRangeGetAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuMemRangeGetAttributes", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuPointerSetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + value: *const ::core::ffi::c_void, + attribute: cuda_types::CUpointer_attribute, + ptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuPointerSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attribute), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attribute, + "cuPointerSetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuPointerSetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuPointerGetAttributes( + writer: &mut (impl std::io::Write + ?Sized), + numAttributes: ::core::ffi::c_uint, + attributes: *mut cuda_types::CUpointer_attribute, + data: *mut *mut ::core::ffi::c_void, + ptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(numAttributes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numAttributes, + "cuPointerGetAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attributes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attributes, + "cuPointerGetAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(data), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&data, "cuPointerGetAttributes", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuPointerGetAttributes", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamCreate( + writer: &mut (impl std::io::Write + ?Sized), + phStream: *mut cuda_types::CUstream, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phStream, "cuStreamCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuStreamCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamCreateWithPriority( + writer: &mut (impl std::io::Write + ?Sized), + phStream: *mut cuda_types::CUstream, + flags: ::core::ffi::c_uint, + priority: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phStream, + "cuStreamCreateWithPriority", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamCreateWithPriority", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(priority), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &priority, + "cuStreamCreateWithPriority", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetPriority_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + priority: *mut ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetPriority_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(priority), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &priority, + "cuStreamGetPriority_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetFlags_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + flags: *mut ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetFlags_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamGetFlags_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamGetId_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + streamId: *mut ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamGetId_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(streamId), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&streamId, "cuStreamGetId_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCtx_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + pctx: *mut cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamGetCtx_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuStreamGetCtx_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitEvent_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + hEvent: cuda_types::CUevent, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamWaitEvent_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hEvent, + "cuStreamWaitEvent_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Flags, + "cuStreamWaitEvent_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamAddCallback_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + callback: cuda_types::CUstreamCallback, + userData: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamAddCallback_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(callback), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &callback, + "cuStreamAddCallback_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(userData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &userData, + "cuStreamAddCallback_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamAddCallback_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBeginCapture_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + mode: cuda_types::CUstreamCaptureMode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamBeginCapture_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mode, + "cuStreamBeginCapture_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBeginCaptureToGraph_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + mode: cuda_types::CUstreamCaptureMode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamBeginCaptureToGraph_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuStreamBeginCaptureToGraph_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuStreamBeginCaptureToGraph_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencyData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencyData, + "cuStreamBeginCaptureToGraph_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuStreamBeginCaptureToGraph_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mode, + "cuStreamBeginCaptureToGraph_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuThreadExchangeStreamCaptureMode( + writer: &mut (impl std::io::Write + ?Sized), + mode: *mut cuda_types::CUstreamCaptureMode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mode, + "cuThreadExchangeStreamCaptureMode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamEndCapture_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + phGraph: *mut cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamEndCapture_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(phGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraph, + "cuStreamEndCapture_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamIsCapturing_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus: *mut cuda_types::CUstreamCaptureStatus, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamIsCapturing_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus, + "cuStreamIsCapturing_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCaptureInfo_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + numDependencies_out: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetCaptureInfo_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus_out, + "cuStreamGetCaptureInfo_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(id_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &id_out, + "cuStreamGetCaptureInfo_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(graph_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &graph_out, + "cuStreamGetCaptureInfo_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies_out, + "cuStreamGetCaptureInfo_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies_out, + "cuStreamGetCaptureInfo_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCaptureInfo_v3_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + edgeData_out: *mut *const cuda_types::CUgraphEdgeData, + numDependencies_out: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetCaptureInfo_v3_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus_out, + "cuStreamGetCaptureInfo_v3_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(id_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &id_out, + "cuStreamGetCaptureInfo_v3_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(graph_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &graph_out, + "cuStreamGetCaptureInfo_v3_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies_out, + "cuStreamGetCaptureInfo_v3_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(edgeData_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &edgeData_out, + "cuStreamGetCaptureInfo_v3_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies_out, + "cuStreamGetCaptureInfo_v3_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamUpdateCaptureDependencies_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + numDependencies: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamUpdateCaptureDependencies_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuStreamUpdateCaptureDependencies_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuStreamUpdateCaptureDependencies_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamUpdateCaptureDependencies_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamUpdateCaptureDependencies_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamUpdateCaptureDependencies_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuStreamUpdateCaptureDependencies_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencyData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencyData, + "cuStreamUpdateCaptureDependencies_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuStreamUpdateCaptureDependencies_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamUpdateCaptureDependencies_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamAttachMemAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + dptr: cuda_types::CUdeviceptr, + length: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamAttachMemAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuStreamAttachMemAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(length), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &length, + "cuStreamAttachMemAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamAttachMemAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamQuery_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamQuery_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamSynchronize_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamSynchronize_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamDestroy_v2( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamDestroy_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamCopyAttributes_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dst: cuda_types::CUstream, + src: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dst, + "cuStreamCopyAttributes_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(src), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &src, + "cuStreamCopyAttributes_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEventCreate( + writer: &mut (impl std::io::Write + ?Sized), + phEvent: *mut cuda_types::CUevent, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phEvent, "cuEventCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuEventCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventRecord_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuEventRecord_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuEventRecord_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventRecordWithFlags_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hEvent, + "cuEventRecordWithFlags_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuEventRecordWithFlags_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuEventRecordWithFlags_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEventQuery( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuEventQuery", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventSynchronize( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuEventSynchronize", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventDestroy_v2( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuEventDestroy_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventElapsedTime( + writer: &mut (impl std::io::Write + ?Sized), + pMilliseconds: *mut f32, + hStart: cuda_types::CUevent, + hEnd: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pMilliseconds), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pMilliseconds, + "cuEventElapsedTime", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStart), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStart, "cuEventElapsedTime", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hEnd), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEnd, "cuEventElapsedTime", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuImportExternalMemory( + writer: &mut (impl std::io::Write + ?Sized), + extMem_out: *mut cuda_types::CUexternalMemory, + memHandleDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_HANDLE_DESC, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extMem_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extMem_out, + "cuImportExternalMemory", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(memHandleDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &memHandleDesc, + "cuImportExternalMemory", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuExternalMemoryGetMappedBuffer( + writer: &mut (impl std::io::Write + ?Sized), + devPtr: *mut cuda_types::CUdeviceptr, + extMem: cuda_types::CUexternalMemory, + bufferDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_BUFFER_DESC, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &devPtr, + "cuExternalMemoryGetMappedBuffer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(extMem), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extMem, + "cuExternalMemoryGetMappedBuffer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bufferDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bufferDesc, + "cuExternalMemoryGetMappedBuffer", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuExternalMemoryGetMappedMipmappedArray( + writer: &mut (impl std::io::Write + ?Sized), + mipmap: *mut cuda_types::CUmipmappedArray, + extMem: cuda_types::CUexternalMemory, + mipmapDesc: *const cuda_types::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mipmap), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mipmap, + "cuExternalMemoryGetMappedMipmappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(extMem), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extMem, + "cuExternalMemoryGetMappedMipmappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mipmapDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mipmapDesc, + "cuExternalMemoryGetMappedMipmappedArray", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDestroyExternalMemory( + writer: &mut (impl std::io::Write + ?Sized), + extMem: cuda_types::CUexternalMemory, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extMem), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extMem, + "cuDestroyExternalMemory", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuImportExternalSemaphore( + writer: &mut (impl std::io::Write + ?Sized), + extSem_out: *mut cuda_types::CUexternalSemaphore, + semHandleDesc: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extSem_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extSem_out, + "cuImportExternalSemaphore", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(semHandleDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &semHandleDesc, + "cuImportExternalSemaphore", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuSignalExternalSemaphoresAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extSemArray, + "cuSignalExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶msArray, + "cuSignalExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numExtSems, + "cuSignalExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuSignalExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuWaitExternalSemaphoresAsync_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extSemArray, + "cuWaitExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶msArray, + "cuWaitExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numExtSems, + "cuWaitExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuWaitExternalSemaphoresAsync_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDestroyExternalSemaphore( + writer: &mut (impl std::io::Write + ?Sized), + extSem: cuda_types::CUexternalSemaphore, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extSem), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extSem, + "cuDestroyExternalSemaphore", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue32_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWaitValue32_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWaitValue32_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWaitValue32_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWaitValue32_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue64_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWaitValue64_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWaitValue64_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWaitValue64_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWaitValue64_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue32_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWriteValue32_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWriteValue32_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWriteValue32_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWriteValue32_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue64_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWriteValue64_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWriteValue64_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWriteValue64_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWriteValue64_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBatchMemOp_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamBatchMemOp_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuStreamBatchMemOp_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶mArray, + "cuStreamBatchMemOp_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamBatchMemOp_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuFuncGetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + pi: *mut ::core::ffi::c_int, + attrib: cuda_types::CUfunction_attribute, + hfunc: cuda_types::CUfunction, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pi), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pi, "cuFuncGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&attrib, "cuFuncGetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuFuncGetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncSetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + attrib: cuda_types::CUfunction_attribute, + value: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuFuncSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&attrib, "cuFuncSetAttribute", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuFuncSetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncSetCacheConfig( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + config: cuda_types::CUfunc_cache, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuFuncSetCacheConfig", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&config, "cuFuncSetCacheConfig", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncGetModule( + writer: &mut (impl std::io::Write + ?Sized), + hmod: *mut cuda_types::CUmodule, + hfunc: cuda_types::CUfunction, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hmod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hmod, "cuFuncGetModule", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuFuncGetModule", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncGetName( + writer: &mut (impl std::io::Write + ?Sized), + name: *mut *const ::core::ffi::c_char, + hfunc: cuda_types::CUfunction, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuFuncGetName", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuFuncGetName", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncGetParamInfo( + writer: &mut (impl std::io::Write + ?Sized), + func: cuda_types::CUfunction, + paramIndex: usize, + paramOffset: *mut usize, + paramSize: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&func, "cuFuncGetParamInfo", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramIndex), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶mIndex, + "cuFuncGetParamInfo", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶mOffset, + "cuFuncGetParamInfo", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶mSize, + "cuFuncGetParamInfo", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUfunctionLoadingState_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_UNLOADED => { + writer + .write_all(stringify!(CU_FUNCTION_LOADING_STATE_UNLOADED).as_bytes()) + } + &cuda_types::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_LOADED => { + writer.write_all(stringify!(CU_FUNCTION_LOADING_STATE_LOADED).as_bytes()) + } + &cuda_types::CUfunctionLoadingState_enum::CU_FUNCTION_LOADING_STATE_MAX => { + writer.write_all(stringify!(CU_FUNCTION_LOADING_STATE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +pub fn write_cuFuncIsLoaded( + writer: &mut (impl std::io::Write + ?Sized), + state: *mut cuda_types::CUfunctionLoadingState, + function: cuda_types::CUfunction, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(state), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&state, "cuFuncIsLoaded", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(function), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&function, "cuFuncIsLoaded", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncLoad( + writer: &mut (impl std::io::Write + ?Sized), + function: cuda_types::CUfunction, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(function), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&function, "cuFuncLoad", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunchKernel_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunchKernel_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimX, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimY, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimZ, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimX, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimY, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimZ, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &sharedMemBytes, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuLaunchKernel_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &kernelParams, + "cuLaunchKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(extra), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&extra, "cuLaunchKernel_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunchKernelEx_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + config: *const cuda_types::CUlaunchConfig, + f: cuda_types::CUfunction, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &config, + "cuLaunchKernelEx_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunchKernelEx_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &kernelParams, + "cuLaunchKernelEx_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(extra), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&extra, "cuLaunchKernelEx_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunchCooperativeKernel_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &f, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimX, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimY, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimZ, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimX, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimY, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimZ, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &sharedMemBytes, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &kernelParams, + "cuLaunchCooperativeKernel_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLaunchCooperativeKernelMultiDevice( + writer: &mut (impl std::io::Write + ?Sized), + launchParamsList: *mut cuda_types::CUDA_LAUNCH_PARAMS, + numDevices: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(launchParamsList), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &launchParamsList, + "cuLaunchCooperativeKernelMultiDevice", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDevices), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDevices, + "cuLaunchCooperativeKernelMultiDevice", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuLaunchCooperativeKernelMultiDevice", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLaunchHostFunc_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + fn_: cuda_types::CUhostFn, + userData: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuLaunchHostFunc_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fn_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&fn_, "cuLaunchHostFunc_ptsz", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(userData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &userData, + "cuLaunchHostFunc_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuFuncSetBlockShape( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + x: ::core::ffi::c_int, + y: ::core::ffi::c_int, + z: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuFuncSetBlockShape", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(x), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&x, "cuFuncSetBlockShape", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(y), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&y, "cuFuncSetBlockShape", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(z), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&z, "cuFuncSetBlockShape", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncSetSharedSize( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + bytes: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuFuncSetSharedSize", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuFuncSetSharedSize", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuParamSetSize( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + numbytes: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuParamSetSize", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numbytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numbytes, "cuParamSetSize", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuParamSeti( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + offset: ::core::ffi::c_int, + value: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuParamSeti", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(offset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&offset, "cuParamSeti", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuParamSeti", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuParamSetf( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + offset: ::core::ffi::c_int, + value: f32, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuParamSetf", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(offset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&offset, "cuParamSetf", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuParamSetf", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuParamSetv( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + offset: ::core::ffi::c_int, + ptr: *mut ::core::ffi::c_void, + numbytes: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuParamSetv", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(offset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&offset, "cuParamSetv", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuParamSetv", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numbytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numbytes, "cuParamSetv", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunch( + writer: &mut (impl std::io::Write + ?Sized), + f: cuda_types::CUfunction, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunch", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunchGrid( + writer: &mut (impl std::io::Write + ?Sized), + f: cuda_types::CUfunction, + grid_width: ::core::ffi::c_int, + grid_height: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunchGrid", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(grid_width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&grid_width, "cuLaunchGrid", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(grid_height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&grid_height, "cuLaunchGrid", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunchGridAsync( + writer: &mut (impl std::io::Write + ?Sized), + f: cuda_types::CUfunction, + grid_width: ::core::ffi::c_int, + grid_height: ::core::ffi::c_int, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunchGridAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(grid_width), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &grid_width, + "cuLaunchGridAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(grid_height), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &grid_height, + "cuLaunchGridAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuLaunchGridAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuParamSetTexRef( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + texunit: ::core::ffi::c_int, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hfunc, "cuParamSetTexRef", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(texunit), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&texunit, "cuParamSetTexRef", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuParamSetTexRef", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuFuncSetSharedMemConfig( + writer: &mut (impl std::io::Write + ?Sized), + hfunc: cuda_types::CUfunction, + config: cuda_types::CUsharedconfig, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hfunc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hfunc, + "cuFuncSetSharedMemConfig", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &config, + "cuFuncSetSharedMemConfig", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphCreate( + writer: &mut (impl std::io::Write + ?Sized), + phGraph: *mut cuda_types::CUgraph, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phGraph, "cuGraphCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuGraphCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphAddKernelNode_v2( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddKernelNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddKernelNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddKernelNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddKernelNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddKernelNode_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphKernelNodeGetParams_v2( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphKernelNodeGetParams_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphKernelNodeGetParams_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphKernelNodeSetParams_v2( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphKernelNodeSetParams_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphKernelNodeSetParams_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddMemcpyNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + copyParams: *const cuda_types::CUDA_MEMCPY3D, + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddMemcpyNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphAddMemcpyNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddMemcpyNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddMemcpyNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(copyParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ©Params, + "cuGraphAddMemcpyNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuGraphAddMemcpyNode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphMemcpyNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_MEMCPY3D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphMemcpyNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphMemcpyNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphMemcpyNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_MEMCPY3D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphMemcpyNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphMemcpyNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddMemsetNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS, + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddMemsetNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphAddMemsetNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddMemsetNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddMemsetNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(memsetParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &memsetParams, + "cuGraphAddMemsetNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuGraphAddMemsetNode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphMemsetNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_MEMSET_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphMemsetNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphMemsetNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphMemsetNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphMemsetNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphMemsetNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddHostNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddHostNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphAddHostNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddHostNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddHostNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddHostNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphHostNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_HOST_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphHostNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphHostNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphHostNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphHostNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphHostNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddChildGraphNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + childGraph: cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddChildGraphNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddChildGraphNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddChildGraphNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddChildGraphNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(childGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &childGraph, + "cuGraphAddChildGraphNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphChildGraphNodeGetGraph( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + phGraph: *mut cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphChildGraphNodeGetGraph", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(phGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraph, + "cuGraphChildGraphNodeGetGraph", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddEmptyNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddEmptyNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphAddEmptyNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddEmptyNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddEmptyNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddEventRecordNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + event: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddEventRecordNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddEventRecordNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddEventRecordNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddEventRecordNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event, + "cuGraphAddEventRecordNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphEventRecordNodeGetEvent( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + event_out: *mut cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphEventRecordNodeGetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event_out, + "cuGraphEventRecordNodeGetEvent", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphEventRecordNodeSetEvent( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphEventRecordNodeSetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event, + "cuGraphEventRecordNodeSetEvent", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddEventWaitNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + event: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddEventWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddEventWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddEventWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddEventWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event, + "cuGraphAddEventWaitNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphEventWaitNodeGetEvent( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + event_out: *mut cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphEventWaitNodeGetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event_out, + "cuGraphEventWaitNodeGetEvent", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphEventWaitNodeSetEvent( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphEventWaitNodeSetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event, + "cuGraphEventWaitNodeSetEvent", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddExternalSemaphoresSignalNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddExternalSemaphoresSignalNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddExternalSemaphoresSignalNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddExternalSemaphoresSignalNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddExternalSemaphoresSignalNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddExternalSemaphoresSignalNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExternalSemaphoresSignalNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + params_out: *mut cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExternalSemaphoresSignalNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(params_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶ms_out, + "cuGraphExternalSemaphoresSignalNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExternalSemaphoresSignalNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExternalSemaphoresSignalNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExternalSemaphoresSignalNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddExternalSemaphoresWaitNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddExternalSemaphoresWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddExternalSemaphoresWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddExternalSemaphoresWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddExternalSemaphoresWaitNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddExternalSemaphoresWaitNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExternalSemaphoresWaitNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + params_out: *mut cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExternalSemaphoresWaitNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(params_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶ms_out, + "cuGraphExternalSemaphoresWaitNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExternalSemaphoresWaitNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExternalSemaphoresWaitNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExternalSemaphoresWaitNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddBatchMemOpNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddBatchMemOpNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddBatchMemOpNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddBatchMemOpNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddBatchMemOpNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddBatchMemOpNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphBatchMemOpNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams_out: *mut cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphBatchMemOpNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams_out, + "cuGraphBatchMemOpNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphBatchMemOpNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphBatchMemOpNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphBatchMemOpNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecBatchMemOpNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_BATCH_MEM_OP_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecBatchMemOpNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecBatchMemOpNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExecBatchMemOpNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddMemAllocNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddMemAllocNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddMemAllocNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddMemAllocNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddMemAllocNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddMemAllocNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphMemAllocNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + params_out: *mut cuda_types::CUDA_MEM_ALLOC_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphMemAllocNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(params_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶ms_out, + "cuGraphMemAllocNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddMemFreeNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + dptr: cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddMemFreeNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddMemFreeNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddMemFreeNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddMemFreeNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuGraphAddMemFreeNode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphMemFreeNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + dptr_out: *mut cuda_types::CUdeviceptr, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphMemFreeNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr_out, + "cuGraphMemFreeNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGraphMemTrim( + writer: &mut (impl std::io::Write + ?Sized), + device: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuDeviceGraphMemTrim", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetGraphMemAttribute( + writer: &mut (impl std::io::Write + ?Sized), + device: cuda_types::CUdevice, + attr: cuda_types::CUgraphMem_attribute, + value: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &device, + "cuDeviceGetGraphMemAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attr, + "cuDeviceGetGraphMemAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuDeviceGetGraphMemAttribute", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceSetGraphMemAttribute( + writer: &mut (impl std::io::Write + ?Sized), + device: cuda_types::CUdevice, + attr: cuda_types::CUgraphMem_attribute, + value: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &device, + "cuDeviceSetGraphMemAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attr, + "cuDeviceSetGraphMemAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuDeviceSetGraphMemAttribute", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphClone( + writer: &mut (impl std::io::Write + ?Sized), + phGraphClone: *mut cuda_types::CUgraph, + originalGraph: cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphClone), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phGraphClone, "cuGraphClone", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(originalGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&originalGraph, "cuGraphClone", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeFindInClone( + writer: &mut (impl std::io::Write + ?Sized), + phNode: *mut cuda_types::CUgraphNode, + hOriginalNode: cuda_types::CUgraphNode, + hClonedGraph: cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phNode, + "cuGraphNodeFindInClone", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hOriginalNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hOriginalNode, + "cuGraphNodeFindInClone", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hClonedGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hClonedGraph, + "cuGraphNodeFindInClone", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeGetType( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + type_: *mut cuda_types::CUgraphNodeType, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hNode, "cuGraphNodeGetType", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&type_, "cuGraphNodeGetType", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphGetNodes( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + nodes: *mut cuda_types::CUgraphNode, + numNodes: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphGetNodes", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&nodes, "cuGraphGetNodes", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numNodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numNodes, "cuGraphGetNodes", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphGetRootNodes( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + rootNodes: *mut cuda_types::CUgraphNode, + numRootNodes: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphGetRootNodes", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(rootNodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &rootNodes, + "cuGraphGetRootNodes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numRootNodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numRootNodes, + "cuGraphGetRootNodes", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphGetEdges( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + from: *mut cuda_types::CUgraphNode, + to: *mut cuda_types::CUgraphNode, + numEdges: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphGetEdges", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(from), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&from, "cuGraphGetEdges", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(to), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&to, "cuGraphGetEdges", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numEdges), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numEdges, "cuGraphGetEdges", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphGetEdges_v2( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + from: *mut cuda_types::CUgraphNode, + to: *mut cuda_types::CUgraphNode, + edgeData: *mut cuda_types::CUgraphEdgeData, + numEdges: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphGetEdges_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(from), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&from, "cuGraphGetEdges_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(to), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&to, "cuGraphGetEdges_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(edgeData), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&edgeData, "cuGraphGetEdges_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numEdges), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numEdges, "cuGraphGetEdges_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeGetDependencies( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + dependencies: *mut cuda_types::CUgraphNode, + numDependencies: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphNodeGetDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphNodeGetDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphNodeGetDependencies", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeGetDependencies_v2( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + dependencies: *mut cuda_types::CUgraphNode, + edgeData: *mut cuda_types::CUgraphEdgeData, + numDependencies: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphNodeGetDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphNodeGetDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(edgeData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &edgeData, + "cuGraphNodeGetDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphNodeGetDependencies_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeGetDependentNodes( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + dependentNodes: *mut cuda_types::CUgraphNode, + numDependentNodes: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphNodeGetDependentNodes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependentNodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependentNodes, + "cuGraphNodeGetDependentNodes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependentNodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependentNodes, + "cuGraphNodeGetDependentNodes", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeGetDependentNodes_v2( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + dependentNodes: *mut cuda_types::CUgraphNode, + edgeData: *mut cuda_types::CUgraphEdgeData, + numDependentNodes: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphNodeGetDependentNodes_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependentNodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependentNodes, + "cuGraphNodeGetDependentNodes_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(edgeData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &edgeData, + "cuGraphNodeGetDependentNodes_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependentNodes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependentNodes, + "cuGraphNodeGetDependentNodes_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddDependencies( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + numDependencies: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(from), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&from, "cuGraphAddDependencies", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(to), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&to, "cuGraphAddDependencies", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddDependencies", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddDependencies_v2( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + edgeData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphAddDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(from), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &from, + "cuGraphAddDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(to), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &to, + "cuGraphAddDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(edgeData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &edgeData, + "cuGraphAddDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddDependencies_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphRemoveDependencies( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + numDependencies: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphRemoveDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(from), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &from, + "cuGraphRemoveDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(to), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &to, + "cuGraphRemoveDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphRemoveDependencies", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphRemoveDependencies_v2( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + from: *const cuda_types::CUgraphNode, + to: *const cuda_types::CUgraphNode, + edgeData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphRemoveDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(from), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &from, + "cuGraphRemoveDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(to), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &to, + "cuGraphRemoveDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(edgeData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &edgeData, + "cuGraphRemoveDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphRemoveDependencies_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphDestroyNode( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hNode, "cuGraphDestroyNode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphInstantiateWithFlags( + writer: &mut (impl std::io::Write + ?Sized), + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + flags: ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphExec, + "cuGraphInstantiateWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphInstantiateWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphInstantiateWithFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphInstantiateWithParams_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphExec, + "cuGraphInstantiateWithParams_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphInstantiateWithParams_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(instantiateParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &instantiateParams, + "cuGraphInstantiateWithParams_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecGetFlags( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + flags: *mut cuda_types::cuuint64_t, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecGetFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuGraphExecGetFlags", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphExecKernelNodeSetParams_v2( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecKernelNodeSetParams_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecKernelNodeSetParams_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExecKernelNodeSetParams_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecMemcpyNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + copyParams: *const cuda_types::CUDA_MEMCPY3D, + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecMemcpyNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecMemcpyNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(copyParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ©Params, + "cuGraphExecMemcpyNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ctx, + "cuGraphExecMemcpyNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecMemsetNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + memsetParams: *const cuda_types::CUDA_MEMSET_NODE_PARAMS, + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecMemsetNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecMemsetNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(memsetParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &memsetParams, + "cuGraphExecMemsetNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ctx, + "cuGraphExecMemsetNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecHostNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_HOST_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecHostNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecHostNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExecHostNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecChildGraphNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + childGraph: cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecChildGraphNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecChildGraphNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(childGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &childGraph, + "cuGraphExecChildGraphNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecEventRecordNodeSetEvent( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecEventRecordNodeSetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecEventRecordNodeSetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event, + "cuGraphExecEventRecordNodeSetEvent", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecEventWaitNodeSetEvent( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + event: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecEventWaitNodeSetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecEventWaitNodeSetEvent", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(event), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &event, + "cuGraphExecEventWaitNodeSetEvent", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecExternalSemaphoresSignalNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_SIGNAL_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecExternalSemaphoresSignalNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecExternalSemaphoresSignalNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExecExternalSemaphoresSignalNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecExternalSemaphoresWaitNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_EXT_SEM_WAIT_NODE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecExternalSemaphoresWaitNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecExternalSemaphoresWaitNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExecExternalSemaphoresWaitNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeSetEnabled( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + isEnabled: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphNodeSetEnabled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hNode, "cuGraphNodeSetEnabled", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(isEnabled), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &isEnabled, + "cuGraphNodeSetEnabled", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeGetEnabled( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + isEnabled: *mut ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphNodeGetEnabled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hNode, "cuGraphNodeGetEnabled", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(isEnabled), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &isEnabled, + "cuGraphNodeGetEnabled", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphUpload_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphUpload_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuGraphUpload_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphLaunch_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphLaunch_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuGraphLaunch_ptsz", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphExecDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecDestroy", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphExecUpdate_v2( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + resultInfo: *mut cuda_types::CUgraphExecUpdateResultInfo, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecUpdate_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphExecUpdate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resultInfo), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resultInfo, + "cuGraphExecUpdate_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphKernelNodeCopyAttributes( + writer: &mut (impl std::io::Write + ?Sized), + dst: cuda_types::CUgraphNode, + src: cuda_types::CUgraphNode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dst, + "cuGraphKernelNodeCopyAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(src), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &src, + "cuGraphKernelNodeCopyAttributes", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphDebugDotPrint( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraph, + path: *const ::core::ffi::c_char, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphDebugDotPrint", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(path), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&path, "cuGraphDebugDotPrint", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuGraphDebugDotPrint", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuUserObjectCreate( + writer: &mut (impl std::io::Write + ?Sized), + object_out: *mut cuda_types::CUuserObject, + ptr: *mut ::core::ffi::c_void, + destroy: cuda_types::CUhostFn, + initialRefcount: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(object_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &object_out, + "cuUserObjectCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ptr, "cuUserObjectCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(destroy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&destroy, "cuUserObjectCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(initialRefcount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &initialRefcount, + "cuUserObjectCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuUserObjectCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuUserObjectRetain( + writer: &mut (impl std::io::Write + ?Sized), + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(object), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&object, "cuUserObjectRetain", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuUserObjectRetain", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuUserObjectRelease( + writer: &mut (impl std::io::Write + ?Sized), + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(object), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&object, "cuUserObjectRelease", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuUserObjectRelease", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphRetainUserObject( + writer: &mut (impl std::io::Write + ?Sized), + graph: cuda_types::CUgraph, + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(graph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &graph, + "cuGraphRetainUserObject", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(object), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &object, + "cuGraphRetainUserObject", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuGraphRetainUserObject", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphRetainUserObject", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphReleaseUserObject( + writer: &mut (impl std::io::Write + ?Sized), + graph: cuda_types::CUgraph, + object: cuda_types::CUuserObject, + count: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(graph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &graph, + "cuGraphReleaseUserObject", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(object), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &object, + "cuGraphReleaseUserObject", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuGraphReleaseUserObject", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *mut cuda_types::CUgraphNodeParams, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phGraphNode, "cuGraphAddNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphAddNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dependencies, "cuGraphAddNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&nodeParams, "cuGraphAddNode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphAddNode_v2( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + nodeParams: *mut cuda_types::CUgraphNodeParams, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphAddNode_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencyData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencyData, + "cuGraphAddNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddNode_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddNode_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUgraphNodeParams, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hNode, "cuGraphNodeSetParams", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUgraphNodeParams, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExecNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphConditionalHandleCreate( + writer: &mut (impl std::io::Write + ?Sized), + pHandle_out: *mut cuda_types::CUgraphConditionalHandle, + hGraph: cuda_types::CUgraph, + ctx: cuda_types::CUcontext, + defaultLaunchValue: ::core::ffi::c_uint, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pHandle_out, + "cuGraphConditionalHandleCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphConditionalHandleCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ctx, + "cuGraphConditionalHandleCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(defaultLaunchValue), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &defaultLaunchValue, + "cuGraphConditionalHandleCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphConditionalHandleCreate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuOccupancyMaxActiveBlocksPerMultiprocessor( + writer: &mut (impl std::io::Write + ?Sized), + numBlocks: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSize: ::core::ffi::c_int, + dynamicSMemSize: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(numBlocks), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numBlocks, + "cuOccupancyMaxActiveBlocksPerMultiprocessor", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &func, + "cuOccupancyMaxActiveBlocksPerMultiprocessor", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSize, + "cuOccupancyMaxActiveBlocksPerMultiprocessor", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dynamicSMemSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dynamicSMemSize, + "cuOccupancyMaxActiveBlocksPerMultiprocessor", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + writer: &mut (impl std::io::Write + ?Sized), + numBlocks: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSize: ::core::ffi::c_int, + dynamicSMemSize: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(numBlocks), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numBlocks, + "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &func, + "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSize, + "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dynamicSMemSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dynamicSMemSize, + "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuOccupancyMaxPotentialBlockSize( + writer: &mut (impl std::io::Write + ?Sized), + minGridSize: *mut ::core::ffi::c_int, + blockSize: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize, + dynamicSMemSize: usize, + blockSizeLimit: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(minGridSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &minGridSize, + "cuOccupancyMaxPotentialBlockSize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSize, + "cuOccupancyMaxPotentialBlockSize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &func, + "cuOccupancyMaxPotentialBlockSize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSizeToDynamicSMemSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSizeToDynamicSMemSize, + "cuOccupancyMaxPotentialBlockSize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dynamicSMemSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dynamicSMemSize, + "cuOccupancyMaxPotentialBlockSize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSizeLimit), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSizeLimit, + "cuOccupancyMaxPotentialBlockSize", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuOccupancyMaxPotentialBlockSizeWithFlags( + writer: &mut (impl std::io::Write + ?Sized), + minGridSize: *mut ::core::ffi::c_int, + blockSize: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + blockSizeToDynamicSMemSize: cuda_types::CUoccupancyB2DSize, + dynamicSMemSize: usize, + blockSizeLimit: ::core::ffi::c_int, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(minGridSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &minGridSize, + "cuOccupancyMaxPotentialBlockSizeWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSize, + "cuOccupancyMaxPotentialBlockSizeWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &func, + "cuOccupancyMaxPotentialBlockSizeWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSizeToDynamicSMemSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSizeToDynamicSMemSize, + "cuOccupancyMaxPotentialBlockSizeWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dynamicSMemSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dynamicSMemSize, + "cuOccupancyMaxPotentialBlockSizeWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSizeLimit), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSizeLimit, + "cuOccupancyMaxPotentialBlockSizeWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuOccupancyMaxPotentialBlockSizeWithFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuOccupancyAvailableDynamicSMemPerBlock( + writer: &mut (impl std::io::Write + ?Sized), + dynamicSmemSize: *mut usize, + func: cuda_types::CUfunction, + numBlocks: ::core::ffi::c_int, + blockSize: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dynamicSmemSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dynamicSmemSize, + "cuOccupancyAvailableDynamicSMemPerBlock", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &func, + "cuOccupancyAvailableDynamicSMemPerBlock", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numBlocks), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numBlocks, + "cuOccupancyAvailableDynamicSMemPerBlock", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockSize, + "cuOccupancyAvailableDynamicSMemPerBlock", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuOccupancyMaxPotentialClusterSize( + writer: &mut (impl std::io::Write + ?Sized), + clusterSize: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + config: *const cuda_types::CUlaunchConfig, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(clusterSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &clusterSize, + "cuOccupancyMaxPotentialClusterSize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &func, + "cuOccupancyMaxPotentialClusterSize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &config, + "cuOccupancyMaxPotentialClusterSize", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuOccupancyMaxActiveClusters( + writer: &mut (impl std::io::Write + ?Sized), + numClusters: *mut ::core::ffi::c_int, + func: cuda_types::CUfunction, + config: *const cuda_types::CUlaunchConfig, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(numClusters), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numClusters, + "cuOccupancyMaxActiveClusters", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(func), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &func, + "cuOccupancyMaxActiveClusters", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &config, + "cuOccupancyMaxActiveClusters", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetArray( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + hArray: cuda_types::CUarray, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefSetArray", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hArray, "cuTexRefSetArray", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuTexRefSetArray", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetMipmappedArray( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + hMipmappedArray: cuda_types::CUmipmappedArray, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetMipmappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hMipmappedArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hMipmappedArray, + "cuTexRefSetMipmappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Flags, + "cuTexRefSetMipmappedArray", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetAddress_v2( + writer: &mut (impl std::io::Write + ?Sized), + ByteOffset: *mut usize, + hTexRef: cuda_types::CUtexref, + dptr: cuda_types::CUdeviceptr, + bytes: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ByteOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteOffset, + "cuTexRefSetAddress_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetAddress_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuTexRefSetAddress_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuTexRefSetAddress_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetAddress2D_v3( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR, + dptr: cuda_types::CUdeviceptr, + Pitch: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetAddress2D_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(desc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &desc, + "cuTexRefSetAddress2D_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuTexRefSetAddress2D_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Pitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Pitch, + "cuTexRefSetAddress2D_v3", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetFormat( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + fmt: cuda_types::CUarray_format, + NumPackedComponents: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefSetFormat", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fmt), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&fmt, "cuTexRefSetFormat", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(NumPackedComponents), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &NumPackedComponents, + "cuTexRefSetFormat", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetAddressMode( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + dim: ::core::ffi::c_int, + am: cuda_types::CUaddress_mode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetAddressMode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dim), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dim, "cuTexRefSetAddressMode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(am), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&am, "cuTexRefSetAddressMode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetFilterMode( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + fm: cuda_types::CUfilter_mode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetFilterMode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fm), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&fm, "cuTexRefSetFilterMode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetMipmapFilterMode( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + fm: cuda_types::CUfilter_mode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetMipmapFilterMode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fm), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &fm, + "cuTexRefSetMipmapFilterMode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetMipmapLevelBias( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + bias: f32, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetMipmapLevelBias", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bias), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bias, + "cuTexRefSetMipmapLevelBias", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetMipmapLevelClamp( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + minMipmapLevelClamp: f32, + maxMipmapLevelClamp: f32, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetMipmapLevelClamp", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(minMipmapLevelClamp), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &minMipmapLevelClamp, + "cuTexRefSetMipmapLevelClamp", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(maxMipmapLevelClamp), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &maxMipmapLevelClamp, + "cuTexRefSetMipmapLevelClamp", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetMaxAnisotropy( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + maxAniso: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetMaxAnisotropy", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(maxAniso), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &maxAniso, + "cuTexRefSetMaxAnisotropy", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetBorderColor( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + pBorderColor: *mut f32, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetBorderColor", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pBorderColor), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pBorderColor, + "cuTexRefSetBorderColor", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetFlags( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefSetFlags", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuTexRefSetFlags", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetAddress_v2( + writer: &mut (impl std::io::Write + ?Sized), + pdptr: *mut cuda_types::CUdeviceptr, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pdptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pdptr, "cuTexRefGetAddress_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetAddress_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetArray( + writer: &mut (impl std::io::Write + ?Sized), + phArray: *mut cuda_types::CUarray, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phArray, "cuTexRefGetArray", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefGetArray", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetMipmappedArray( + writer: &mut (impl std::io::Write + ?Sized), + phMipmappedArray: *mut cuda_types::CUmipmappedArray, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phMipmappedArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phMipmappedArray, + "cuTexRefGetMipmappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetMipmappedArray", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetAddressMode( + writer: &mut (impl std::io::Write + ?Sized), + pam: *mut cuda_types::CUaddress_mode, + hTexRef: cuda_types::CUtexref, + dim: ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pam), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pam, "cuTexRefGetAddressMode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetAddressMode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dim), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dim, "cuTexRefGetAddressMode", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetFilterMode( + writer: &mut (impl std::io::Write + ?Sized), + pfm: *mut cuda_types::CUfilter_mode, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pfm), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pfm, "cuTexRefGetFilterMode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetFilterMode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetFormat( + writer: &mut (impl std::io::Write + ?Sized), + pFormat: *mut cuda_types::CUarray_format, + pNumChannels: *mut ::core::ffi::c_int, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pFormat), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pFormat, "cuTexRefGetFormat", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pNumChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pNumChannels, + "cuTexRefGetFormat", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefGetFormat", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetMipmapFilterMode( + writer: &mut (impl std::io::Write + ?Sized), + pfm: *mut cuda_types::CUfilter_mode, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pfm), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pfm, + "cuTexRefGetMipmapFilterMode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetMipmapFilterMode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetMipmapLevelBias( + writer: &mut (impl std::io::Write + ?Sized), + pbias: *mut f32, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pbias), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pbias, + "cuTexRefGetMipmapLevelBias", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetMipmapLevelBias", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetMipmapLevelClamp( + writer: &mut (impl std::io::Write + ?Sized), + pminMipmapLevelClamp: *mut f32, + pmaxMipmapLevelClamp: *mut f32, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pminMipmapLevelClamp), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pminMipmapLevelClamp, + "cuTexRefGetMipmapLevelClamp", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pmaxMipmapLevelClamp), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pmaxMipmapLevelClamp, + "cuTexRefGetMipmapLevelClamp", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetMipmapLevelClamp", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetMaxAnisotropy( + writer: &mut (impl std::io::Write + ?Sized), + pmaxAniso: *mut ::core::ffi::c_int, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pmaxAniso), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pmaxAniso, + "cuTexRefGetMaxAnisotropy", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetMaxAnisotropy", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetBorderColor( + writer: &mut (impl std::io::Write + ?Sized), + pBorderColor: *mut f32, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pBorderColor), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pBorderColor, + "cuTexRefGetBorderColor", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefGetBorderColor", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetFlags( + writer: &mut (impl std::io::Write + ?Sized), + pFlags: *mut ::core::ffi::c_uint, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pFlags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pFlags, "cuTexRefGetFlags", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefGetFlags", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefCreate( + writer: &mut (impl std::io::Write + ?Sized), + pTexRef: *mut cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pTexRef, "cuTexRefCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuSurfRefSetArray( + writer: &mut (impl std::io::Write + ?Sized), + hSurfRef: cuda_types::CUsurfref, + hArray: cuda_types::CUarray, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hSurfRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hSurfRef, "cuSurfRefSetArray", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hArray, "cuSurfRefSetArray", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuSurfRefSetArray", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuSurfRefGetArray( + writer: &mut (impl std::io::Write + ?Sized), + phArray: *mut cuda_types::CUarray, + hSurfRef: cuda_types::CUsurfref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phArray, "cuSurfRefGetArray", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hSurfRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hSurfRef, "cuSurfRefGetArray", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexObjectCreate( + writer: &mut (impl std::io::Write + ?Sized), + pTexObject: *mut cuda_types::CUtexObject, + pResDesc: *const cuda_types::CUDA_RESOURCE_DESC, + pTexDesc: *const cuda_types::CUDA_TEXTURE_DESC, + pResViewDesc: *const cuda_types::CUDA_RESOURCE_VIEW_DESC, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pTexObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pTexObject, + "cuTexObjectCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pResDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pResDesc, "cuTexObjectCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pTexDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pTexDesc, "cuTexObjectCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pResViewDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pResViewDesc, + "cuTexObjectCreate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexObjectDestroy( + writer: &mut (impl std::io::Write + ?Sized), + texObject: cuda_types::CUtexObject, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(texObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &texObject, + "cuTexObjectDestroy", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexObjectGetResourceDesc( + writer: &mut (impl std::io::Write + ?Sized), + pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC, + texObject: cuda_types::CUtexObject, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pResDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pResDesc, + "cuTexObjectGetResourceDesc", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(texObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &texObject, + "cuTexObjectGetResourceDesc", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexObjectGetTextureDesc( + writer: &mut (impl std::io::Write + ?Sized), + pTexDesc: *mut cuda_types::CUDA_TEXTURE_DESC, + texObject: cuda_types::CUtexObject, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pTexDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pTexDesc, + "cuTexObjectGetTextureDesc", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(texObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &texObject, + "cuTexObjectGetTextureDesc", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexObjectGetResourceViewDesc( + writer: &mut (impl std::io::Write + ?Sized), + pResViewDesc: *mut cuda_types::CUDA_RESOURCE_VIEW_DESC, + texObject: cuda_types::CUtexObject, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pResViewDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pResViewDesc, + "cuTexObjectGetResourceViewDesc", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(texObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &texObject, + "cuTexObjectGetResourceViewDesc", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuSurfObjectCreate( + writer: &mut (impl std::io::Write + ?Sized), + pSurfObject: *mut cuda_types::CUsurfObject, + pResDesc: *const cuda_types::CUDA_RESOURCE_DESC, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pSurfObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pSurfObject, + "cuSurfObjectCreate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pResDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pResDesc, "cuSurfObjectCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuSurfObjectDestroy( + writer: &mut (impl std::io::Write + ?Sized), + surfObject: cuda_types::CUsurfObject, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(surfObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &surfObject, + "cuSurfObjectDestroy", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuSurfObjectGetResourceDesc( + writer: &mut (impl std::io::Write + ?Sized), + pResDesc: *mut cuda_types::CUDA_RESOURCE_DESC, + surfObject: cuda_types::CUsurfObject, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pResDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pResDesc, + "cuSurfObjectGetResourceDesc", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(surfObject), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &surfObject, + "cuSurfObjectGetResourceDesc", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTensorMapEncodeTiled( + writer: &mut (impl std::io::Write + ?Sized), + tensorMap: *mut cuda_types::CUtensorMap, + tensorDataType: cuda_types::CUtensorMapDataType, + tensorRank: cuda_types::cuuint32_t, + globalAddress: *mut ::core::ffi::c_void, + globalDim: *const cuda_types::cuuint64_t, + globalStrides: *const cuda_types::cuuint64_t, + boxDim: *const cuda_types::cuuint32_t, + elementStrides: *const cuda_types::cuuint32_t, + interleave: cuda_types::CUtensorMapInterleave, + swizzle: cuda_types::CUtensorMapSwizzle, + l2Promotion: cuda_types::CUtensorMapL2promotion, + oobFill: cuda_types::CUtensorMapFloatOOBfill, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(tensorMap), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &tensorMap, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(tensorDataType), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &tensorDataType, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(tensorRank), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &tensorRank, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(globalAddress), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &globalAddress, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(globalDim), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &globalDim, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(globalStrides), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &globalStrides, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(boxDim), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &boxDim, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(elementStrides), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &elementStrides, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(interleave), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &interleave, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(swizzle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &swizzle, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(l2Promotion), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &l2Promotion, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(oobFill), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &oobFill, + "cuTensorMapEncodeTiled", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTensorMapEncodeIm2col( + writer: &mut (impl std::io::Write + ?Sized), + tensorMap: *mut cuda_types::CUtensorMap, + tensorDataType: cuda_types::CUtensorMapDataType, + tensorRank: cuda_types::cuuint32_t, + globalAddress: *mut ::core::ffi::c_void, + globalDim: *const cuda_types::cuuint64_t, + globalStrides: *const cuda_types::cuuint64_t, + pixelBoxLowerCorner: *const ::core::ffi::c_int, + pixelBoxUpperCorner: *const ::core::ffi::c_int, + channelsPerPixel: cuda_types::cuuint32_t, + pixelsPerColumn: cuda_types::cuuint32_t, + elementStrides: *const cuda_types::cuuint32_t, + interleave: cuda_types::CUtensorMapInterleave, + swizzle: cuda_types::CUtensorMapSwizzle, + l2Promotion: cuda_types::CUtensorMapL2promotion, + oobFill: cuda_types::CUtensorMapFloatOOBfill, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(tensorMap), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &tensorMap, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(tensorDataType), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &tensorDataType, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(tensorRank), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &tensorRank, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(globalAddress), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &globalAddress, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(globalDim), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &globalDim, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(globalStrides), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &globalStrides, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pixelBoxLowerCorner), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pixelBoxLowerCorner, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pixelBoxUpperCorner), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pixelBoxUpperCorner, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(channelsPerPixel), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &channelsPerPixel, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pixelsPerColumn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pixelsPerColumn, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(elementStrides), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &elementStrides, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(interleave), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &interleave, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(swizzle), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &swizzle, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(l2Promotion), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &l2Promotion, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(oobFill), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &oobFill, + "cuTensorMapEncodeIm2col", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTensorMapReplaceAddress( + writer: &mut (impl std::io::Write + ?Sized), + tensorMap: *mut cuda_types::CUtensorMap, + globalAddress: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(tensorMap), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &tensorMap, + "cuTensorMapReplaceAddress", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(globalAddress), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &globalAddress, + "cuTensorMapReplaceAddress", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceCanAccessPeer( + writer: &mut (impl std::io::Write + ?Sized), + canAccessPeer: *mut ::core::ffi::c_int, + dev: cuda_types::CUdevice, + peerDev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(canAccessPeer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &canAccessPeer, + "cuDeviceCanAccessPeer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceCanAccessPeer", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(peerDev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &peerDev, + "cuDeviceCanAccessPeer", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuCtxEnablePeerAccess( + writer: &mut (impl std::io::Write + ?Sized), + peerContext: cuda_types::CUcontext, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(peerContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &peerContext, + "cuCtxEnablePeerAccess", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuCtxEnablePeerAccess", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxDisablePeerAccess( + writer: &mut (impl std::io::Write + ?Sized), + peerContext: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(peerContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &peerContext, + "cuCtxDisablePeerAccess", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetP2PAttribute( + writer: &mut (impl std::io::Write + ?Sized), + value: *mut ::core::ffi::c_int, + attrib: cuda_types::CUdevice_P2PAttribute, + srcDevice: cuda_types::CUdevice, + dstDevice: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuDeviceGetP2PAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attrib, + "cuDeviceGetP2PAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuDeviceGetP2PAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuDeviceGetP2PAttribute", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsUnregisterResource( + writer: &mut (impl std::io::Write + ?Sized), + resource: cuda_types::CUgraphicsResource, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsUnregisterResource", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsSubResourceGetMappedArray( + writer: &mut (impl std::io::Write + ?Sized), + pArray: *mut cuda_types::CUarray, + resource: cuda_types::CUgraphicsResource, + arrayIndex: ::core::ffi::c_uint, + mipLevel: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pArray, + "cuGraphicsSubResourceGetMappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsSubResourceGetMappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(arrayIndex), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &arrayIndex, + "cuGraphicsSubResourceGetMappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mipLevel), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mipLevel, + "cuGraphicsSubResourceGetMappedArray", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsResourceGetMappedMipmappedArray( + writer: &mut (impl std::io::Write + ?Sized), + pMipmappedArray: *mut cuda_types::CUmipmappedArray, + resource: cuda_types::CUgraphicsResource, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pMipmappedArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pMipmappedArray, + "cuGraphicsResourceGetMappedMipmappedArray", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsResourceGetMappedMipmappedArray", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsResourceGetMappedPointer_v2( + writer: &mut (impl std::io::Write + ?Sized), + pDevPtr: *mut cuda_types::CUdeviceptr, + pSize: *mut usize, + resource: cuda_types::CUgraphicsResource, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pDevPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pDevPtr, + "cuGraphicsResourceGetMappedPointer_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pSize, + "cuGraphicsResourceGetMappedPointer_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsResourceGetMappedPointer_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsResourceSetMapFlags_v2( + writer: &mut (impl std::io::Write + ?Sized), + resource: cuda_types::CUgraphicsResource, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsResourceSetMapFlags_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphicsResourceSetMapFlags_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsMapResources_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuGraphicsMapResources_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resources), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resources, + "cuGraphicsMapResources_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGraphicsMapResources_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsUnmapResources_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuGraphicsUnmapResources_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resources), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resources, + "cuGraphicsUnmapResources_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGraphicsUnmapResources_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGetProcAddress_v2( + writer: &mut (impl std::io::Write + ?Sized), + symbol: *const ::core::ffi::c_char, + pfn: *mut *mut ::core::ffi::c_void, + cudaVersion: ::core::ffi::c_int, + flags: cuda_types::cuuint64_t, + symbolStatus: *mut cuda_types::CUdriverProcAddressQueryResult, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(symbol), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&symbol, "cuGetProcAddress_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pfn), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pfn, "cuGetProcAddress_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(cudaVersion), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &cudaVersion, + "cuGetProcAddress_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuGetProcAddress_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(symbolStatus), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &symbolStatus, + "cuGetProcAddress_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUcoredumpSettings_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_ENABLE_ON_EXCEPTION => { + writer.write_all(stringify!(CU_COREDUMP_ENABLE_ON_EXCEPTION).as_bytes()) + } + &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_TRIGGER_HOST => { + writer.write_all(stringify!(CU_COREDUMP_TRIGGER_HOST).as_bytes()) + } + &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_LIGHTWEIGHT => { + writer.write_all(stringify!(CU_COREDUMP_LIGHTWEIGHT).as_bytes()) + } + &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_ENABLE_USER_TRIGGER => { + writer.write_all(stringify!(CU_COREDUMP_ENABLE_USER_TRIGGER).as_bytes()) + } + &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_FILE => { + writer.write_all(stringify!(CU_COREDUMP_FILE).as_bytes()) + } + &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_PIPE => { + writer.write_all(stringify!(CU_COREDUMP_PIPE).as_bytes()) + } + &cuda_types::CUcoredumpSettings_enum::CU_COREDUMP_MAX => { + writer.write_all(stringify!(CU_COREDUMP_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +pub fn write_cuCoredumpGetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attrib, + "cuCoredumpGetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuCoredumpGetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuCoredumpGetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCoredumpGetAttributeGlobal( + writer: &mut (impl std::io::Write + ?Sized), + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attrib, + "cuCoredumpGetAttributeGlobal", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuCoredumpGetAttributeGlobal", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &size, + "cuCoredumpGetAttributeGlobal", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuCoredumpSetAttribute( + writer: &mut (impl std::io::Write + ?Sized), + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attrib, + "cuCoredumpSetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuCoredumpSetAttribute", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuCoredumpSetAttribute", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCoredumpSetAttributeGlobal( + writer: &mut (impl std::io::Write + ?Sized), + attrib: cuda_types::CUcoredumpSettings, + value: *mut ::core::ffi::c_void, + size: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(attrib), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &attrib, + "cuCoredumpSetAttributeGlobal", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuCoredumpSetAttributeGlobal", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &size, + "cuCoredumpSetAttributeGlobal", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGetExportTable( + writer: &mut (impl std::io::Write + ?Sized), + ppExportTable: *mut *const ::core::ffi::c_void, + pExportTableId: *const cuda_types::CUuuid, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ppExportTable), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ppExportTable, + "cuGetExportTable", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pExportTableId), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pExportTableId, + "cuGetExportTable", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUgreenCtx { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUdevResourceDesc { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +impl crate::format::CudaDisplay for cuda_types::CUgreenCtxCreate_flags { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUgreenCtxCreate_flags::CU_GREEN_CTX_DEFAULT_STREAM => { + writer.write_all(stringify!(CU_GREEN_CTX_DEFAULT_STREAM).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUdevResourceType { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUdevResourceType::CU_DEV_RESOURCE_TYPE_INVALID => { + writer.write_all(stringify!(CU_DEV_RESOURCE_TYPE_INVALID).as_bytes()) + } + &cuda_types::CUdevResourceType::CU_DEV_RESOURCE_TYPE_SM => { + writer.write_all(stringify!(CU_DEV_RESOURCE_TYPE_SM).as_bytes()) + } + &cuda_types::CUdevResourceType::CU_DEV_RESOURCE_TYPE_MAX => { + writer.write_all(stringify!(CU_DEV_RESOURCE_TYPE_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUdevSmResource_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(smCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.smCount, "", 0, writer)?; + writer.write_all(b" }") + } +} +pub fn write_cuGreenCtxCreate( + writer: &mut (impl std::io::Write + ?Sized), + phCtx: *mut cuda_types::CUgreenCtx, + desc: cuda_types::CUdevResourceDesc, + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phCtx, "cuGreenCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(desc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&desc, "cuGreenCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuGreenCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuGreenCtxCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGreenCtxDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hCtx: cuda_types::CUgreenCtx, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hCtx, "cuGreenCtxDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxFromGreenCtx( + writer: &mut (impl std::io::Write + ?Sized), + pContext: *mut cuda_types::CUcontext, + hCtx: cuda_types::CUgreenCtx, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pContext, "cuCtxFromGreenCtx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hCtx, "cuCtxFromGreenCtx", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDeviceGetDevResource( + writer: &mut (impl std::io::Write + ?Sized), + device: cuda_types::CUdevice, + resource: *mut cuda_types::CUdevResource, + type_: cuda_types::CUdevResourceType, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &device, + "cuDeviceGetDevResource", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuDeviceGetDevResource", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &type_, + "cuDeviceGetDevResource", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuCtxGetDevResource( + writer: &mut (impl std::io::Write + ?Sized), + hCtx: cuda_types::CUcontext, + resource: *mut cuda_types::CUdevResource, + type_: cuda_types::CUdevResourceType, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hCtx, "cuCtxGetDevResource", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuCtxGetDevResource", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&type_, "cuCtxGetDevResource", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGreenCtxGetDevResource( + writer: &mut (impl std::io::Write + ?Sized), + hCtx: cuda_types::CUgreenCtx, + resource: *mut cuda_types::CUdevResource, + type_: cuda_types::CUdevResourceType, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hCtx, + "cuGreenCtxGetDevResource", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGreenCtxGetDevResource", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &type_, + "cuGreenCtxGetDevResource", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevSmResourceSplitByCount( + writer: &mut (impl std::io::Write + ?Sized), + result: *mut cuda_types::CUdevResource, + nbGroups: *mut ::core::ffi::c_uint, + input: *const cuda_types::CUdevResource, + remaining: *mut cuda_types::CUdevResource, + useFlags: ::core::ffi::c_uint, + minCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(result), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &result, + "cuDevSmResourceSplitByCount", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nbGroups), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nbGroups, + "cuDevSmResourceSplitByCount", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(input), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &input, + "cuDevSmResourceSplitByCount", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(remaining), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &remaining, + "cuDevSmResourceSplitByCount", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(useFlags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &useFlags, + "cuDevSmResourceSplitByCount", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(minCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &minCount, + "cuDevSmResourceSplitByCount", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevResourceGenerateDesc( + writer: &mut (impl std::io::Write + ?Sized), + phDesc: *mut cuda_types::CUdevResourceDesc, + resources: *mut cuda_types::CUdevResource, + nbResources: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phDesc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phDesc, + "cuDevResourceGenerateDesc", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resources), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resources, + "cuDevResourceGenerateDesc", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nbResources), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nbResources, + "cuDevResourceGenerateDesc", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGreenCtxRecordEvent( + writer: &mut (impl std::io::Write + ?Sized), + hCtx: cuda_types::CUgreenCtx, + hEvent: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hCtx, "cuGreenCtxRecordEvent", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hEvent, + "cuGreenCtxRecordEvent", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGreenCtxWaitEvent( + writer: &mut (impl std::io::Write + ?Sized), + hCtx: cuda_types::CUgreenCtx, + hEvent: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hCtx, "cuGreenCtxWaitEvent", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuGreenCtxWaitEvent", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamGetGreenCtx( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + phCtx: *mut cuda_types::CUgreenCtx, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamGetGreenCtx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(phCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phCtx, "cuStreamGetGreenCtx", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemHostRegister( + writer: &mut (impl std::io::Write + ?Sized), + p: *mut ::core::ffi::c_void, + bytesize: usize, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(p), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&p, "cuMemHostRegister", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemHostRegister", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuMemHostRegister", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphicsResourceSetMapFlags( + writer: &mut (impl std::io::Write + ?Sized), + resource: cuda_types::CUgraphicsResource, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsResourceSetMapFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphicsResourceSetMapFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLinkCreate( + writer: &mut (impl std::io::Write + ?Sized), + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, + stateOut: *mut cuda_types::CUlinkState, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(numOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numOptions, "cuLinkCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(options), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&options, "cuLinkCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(optionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&optionValues, "cuLinkCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stateOut), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&stateOut, "cuLinkCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLinkAddData( + writer: &mut (impl std::io::Write + ?Sized), + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + data: *mut ::core::ffi::c_void, + size: usize, + name: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(state), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&state, "cuLinkAddData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&type_, "cuLinkAddData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(data), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&data, "cuLinkAddData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuLinkAddData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuLinkAddData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numOptions, "cuLinkAddData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(options), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&options, "cuLinkAddData", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(optionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&optionValues, "cuLinkAddData", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLinkAddFile( + writer: &mut (impl std::io::Write + ?Sized), + state: cuda_types::CUlinkState, + type_: cuda_types::CUjitInputType, + path: *const ::core::ffi::c_char, + numOptions: ::core::ffi::c_uint, + options: *mut cuda_types::CUjit_option, + optionValues: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(state), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&state, "cuLinkAddFile", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(type_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&type_, "cuLinkAddFile", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(path), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&path, "cuLinkAddFile", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numOptions), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&numOptions, "cuLinkAddFile", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(options), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&options, "cuLinkAddFile", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(optionValues), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&optionValues, "cuLinkAddFile", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetAddress2D_v2( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR, + dptr: cuda_types::CUdeviceptr, + Pitch: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetAddress2D_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(desc), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &desc, + "cuTexRefSetAddress2D_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuTexRefSetAddress2D_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Pitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Pitch, + "cuTexRefSetAddress2D_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY2D_v1_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(srcXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(WidthInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.WidthInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_MEMCPY3D_v1_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(srcXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcLOD), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcLOD, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(srcHeight), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.srcHeight, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstXInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstXInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstY, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstZ, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstLOD), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstLOD, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstMemoryType), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstMemoryType, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHost, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstDevice, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstArray, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstPitch, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(dstHeight), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.dstHeight, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(WidthInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.WidthInBytes, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Depth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Depth, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY_DESCRIPTOR_v1_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Format), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Format, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(NumChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.NumChannels, "", 0, writer)?; + writer.write_all(b" }") + } +} +impl crate::format::CudaDisplay for cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1_st { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + writer.write_all(concat!("{ ", stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Width, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Height, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Depth), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Depth, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Format), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Format, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(NumChannels), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.NumChannels, "", 0, writer)?; + writer.write_all(concat!(", ", stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&self.Flags, "", 0, writer)?; + writer.write_all(b" }") + } +} +pub fn write_cuDeviceTotalMem( + writer: &mut (impl std::io::Write + ?Sized), + bytes: *mut ::core::ffi::c_uint, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuDeviceTotalMem", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDeviceTotalMem", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxCreate( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuCtxCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuModuleGetGlobal( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr_v1, + bytes: *mut ::core::ffi::c_uint, + hmod: cuda_types::CUmodule, + name: *const ::core::ffi::c_char, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuModuleGetGlobal", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuModuleGetGlobal", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hmod), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hmod, "cuModuleGetGlobal", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(name), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&name, "cuModuleGetGlobal", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemGetInfo( + writer: &mut (impl std::io::Write + ?Sized), + free: *mut ::core::ffi::c_uint, + total: *mut ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(free), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&free, "cuMemGetInfo", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(total), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&total, "cuMemGetInfo", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAlloc( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr_v1, + bytesize: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemAlloc", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemAlloc", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocPitch( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr_v1, + pPitch: *mut ::core::ffi::c_uint, + WidthInBytes: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, + ElementSizeBytes: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemAllocPitch", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pPitch, "cuMemAllocPitch", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(WidthInBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &WidthInBytes, + "cuMemAllocPitch", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemAllocPitch", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ElementSizeBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ElementSizeBytes, + "cuMemAllocPitch", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemFree( + writer: &mut (impl std::io::Write + ?Sized), + dptr: cuda_types::CUdeviceptr_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemFree", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemGetAddressRange( + writer: &mut (impl std::io::Write + ?Sized), + pbase: *mut cuda_types::CUdeviceptr_v1, + psize: *mut ::core::ffi::c_uint, + dptr: cuda_types::CUdeviceptr_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pbase), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pbase, "cuMemGetAddressRange", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(psize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&psize, "cuMemGetAddressRange", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemGetAddressRange", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocHost( + writer: &mut (impl std::io::Write + ?Sized), + pp: *mut *mut ::core::ffi::c_void, + bytesize: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pp), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pp, "cuMemAllocHost", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemAllocHost", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemHostGetDevicePointer( + writer: &mut (impl std::io::Write + ?Sized), + pdptr: *mut cuda_types::CUdeviceptr_v1, + p: *mut ::core::ffi::c_void, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pdptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pdptr, + "cuMemHostGetDevicePointer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(p), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&p, "cuMemHostGetDevicePointer", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Flags, + "cuMemHostGetDevicePointer", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoD( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyHtoD", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcHost, "cuMemcpyHtoD", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyHtoD", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoH( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstHost, "cuMemcpyDtoH", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoH", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoH", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoD( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyDtoD", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoD", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoD", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoA( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstArray, "cuMemcpyDtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstOffset, "cuMemcpyDtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoA", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoD( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyAtoD", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcArray, "cuMemcpyAtoD", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcOffset, "cuMemcpyAtoD", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAtoD", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoA( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstArray, "cuMemcpyHtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstOffset, "cuMemcpyHtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcHost, "cuMemcpyHtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyHtoA", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoH( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstHost, "cuMemcpyAtoH", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcArray, "cuMemcpyAtoH", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcOffset, "cuMemcpyAtoH", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAtoH", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoA( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstArray, "cuMemcpyAtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstOffset, "cuMemcpyAtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcArray, "cuMemcpyAtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcOffset, "cuMemcpyAtoA", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAtoA", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoAAsync( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: ::core::ffi::c_uint, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstArray, "cuMemcpyHtoAAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstOffset, "cuMemcpyHtoAAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcHost, "cuMemcpyHtoAAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyHtoAAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyHtoAAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoHAsync( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: ::core::ffi::c_uint, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstHost, "cuMemcpyAtoHAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcArray, "cuMemcpyAtoHAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcOffset, "cuMemcpyAtoHAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAtoHAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyAtoHAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2D( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy2D", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2DUnaligned( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy2DUnaligned", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3D( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3D", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoDAsync( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + srcHost: *const ::core::ffi::c_void, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyHtoDAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcHost, "cuMemcpyHtoDAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyHtoDAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyHtoDAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoHAsync( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstHost, "cuMemcpyDtoHAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoHAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoHAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyDtoHAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoDAsync( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + srcDevice: cuda_types::CUdeviceptr_v1, + ByteCount: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyDtoDAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoDAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoDAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyDtoDAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2DAsync( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D_v1, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy2DAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpy2DAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3DAsync( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D_v1, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3DAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpy3DAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD8( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + uc: ::core::ffi::c_uchar, + N: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD8", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD8", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD8", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD16( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + us: ::core::ffi::c_ushort, + N: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD16", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD16", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD16", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD32( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + ui: ::core::ffi::c_uint, + N: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD32", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D8( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + dstPitch: ::core::ffi::c_uint, + uc: ::core::ffi::c_uchar, + Width: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD2D8", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D8", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD2D8", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D8", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D8", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D16( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + dstPitch: ::core::ffi::c_uint, + us: ::core::ffi::c_ushort, + Width: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD2D16", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D16", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD2D16", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D16", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D16", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D32( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr_v1, + dstPitch: ::core::ffi::c_uint, + ui: ::core::ffi::c_uint, + Width: ::core::ffi::c_uint, + Height: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD2D32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD2D32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D32", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuArrayCreate( + writer: &mut (impl std::io::Write + ?Sized), + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pHandle, "cuArrayCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pAllocateArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pAllocateArray, + "cuArrayCreate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArrayGetDescriptor( + writer: &mut (impl std::io::Write + ?Sized), + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY_DESCRIPTOR_v1, + hArray: cuda_types::CUarray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pArrayDescriptor), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pArrayDescriptor, + "cuArrayGetDescriptor", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hArray, "cuArrayGetDescriptor", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuArray3DCreate( + writer: &mut (impl std::io::Write + ?Sized), + pHandle: *mut cuda_types::CUarray, + pAllocateArray: *const cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pHandle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pHandle, "cuArray3DCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pAllocateArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pAllocateArray, + "cuArray3DCreate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuArray3DGetDescriptor( + writer: &mut (impl std::io::Write + ?Sized), + pArrayDescriptor: *mut cuda_types::CUDA_ARRAY3D_DESCRIPTOR_v1, + hArray: cuda_types::CUarray, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pArrayDescriptor), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pArrayDescriptor, + "cuArray3DGetDescriptor", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hArray, + "cuArray3DGetDescriptor", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetAddress( + writer: &mut (impl std::io::Write + ?Sized), + ByteOffset: *mut ::core::ffi::c_uint, + hTexRef: cuda_types::CUtexref, + dptr: cuda_types::CUdeviceptr_v1, + bytes: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ByteOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteOffset, + "cuTexRefSetAddress", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefSetAddress", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuTexRefSetAddress", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytes, "cuTexRefSetAddress", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefSetAddress2D( + writer: &mut (impl std::io::Write + ?Sized), + hTexRef: cuda_types::CUtexref, + desc: *const cuda_types::CUDA_ARRAY_DESCRIPTOR_v1, + dptr: cuda_types::CUdeviceptr_v1, + Pitch: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hTexRef, + "cuTexRefSetAddress2D", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(desc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&desc, "cuTexRefSetAddress2D", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuTexRefSetAddress2D", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Pitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Pitch, "cuTexRefSetAddress2D", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuTexRefGetAddress( + writer: &mut (impl std::io::Write + ?Sized), + pdptr: *mut cuda_types::CUdeviceptr_v1, + hTexRef: cuda_types::CUtexref, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pdptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pdptr, "cuTexRefGetAddress", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hTexRef), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hTexRef, "cuTexRefGetAddress", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphicsResourceGetMappedPointer( + writer: &mut (impl std::io::Write + ?Sized), + pDevPtr: *mut cuda_types::CUdeviceptr_v1, + pSize: *mut ::core::ffi::c_uint, + resource: cuda_types::CUgraphicsResource, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pDevPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pDevPtr, + "cuGraphicsResourceGetMappedPointer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pSize, + "cuGraphicsResourceGetMappedPointer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsResourceGetMappedPointer", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuCtxDestroy( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxPopCurrent( + writer: &mut (impl std::io::Write + ?Sized), + pctx: *mut cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuCtxPopCurrent", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuCtxPushCurrent( + writer: &mut (impl std::io::Write + ?Sized), + ctx: cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(ctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ctx, "cuCtxPushCurrent", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventDestroy( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuEventDestroy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxRelease( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDevicePrimaryCtxRelease", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxReset( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dev, "cuDevicePrimaryCtxReset", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuDevicePrimaryCtxSetFlags( + writer: &mut (impl std::io::Write + ?Sized), + dev: cuda_types::CUdevice, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dev), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dev, + "cuDevicePrimaryCtxSetFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuDevicePrimaryCtxSetFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoD_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyHtoD_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcHost, "cuMemcpyHtoD_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyHtoD_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoH_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstHost, "cuMemcpyDtoH_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoH_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoH_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoD_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyDtoD_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoD_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoD_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoA_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstArray, "cuMemcpyDtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstOffset, "cuMemcpyDtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyDtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyDtoA_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoD_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyAtoD_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcArray, "cuMemcpyAtoD_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcOffset, "cuMemcpyAtoD_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAtoD_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoA_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstArray, "cuMemcpyHtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstOffset, "cuMemcpyHtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcHost, "cuMemcpyHtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyHtoA_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoH_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstHost, "cuMemcpyAtoH_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcArray, "cuMemcpyAtoH_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcOffset, "cuMemcpyAtoH_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAtoH_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoA_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstArray, "cuMemcpyAtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstOffset, "cuMemcpyAtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcArray, "cuMemcpyAtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcOffset, "cuMemcpyAtoA_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAtoA_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoAAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstArray: cuda_types::CUarray, + dstOffset: usize, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstArray, + "cuMemcpyHtoAAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstOffset, + "cuMemcpyHtoAAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcHost, + "cuMemcpyHtoAAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyHtoAAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyHtoAAsync_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAtoHAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcArray: cuda_types::CUarray, + srcOffset: usize, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstHost, + "cuMemcpyAtoHAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcArray, + "cuMemcpyAtoHAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcOffset), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcOffset, + "cuMemcpyAtoHAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyAtoHAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyAtoHAsync_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2D_v2( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy2D_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2DUnaligned_v2( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCopy, + "cuMemcpy2DUnaligned_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3D_v2( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3D_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyHtoDAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcHost: *const ::core::ffi::c_void, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyHtoDAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcHost, + "cuMemcpyHtoDAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyHtoDAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyHtoDAsync_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoHAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstHost: *mut ::core::ffi::c_void, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstHost), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstHost, + "cuMemcpyDtoHAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyDtoHAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyDtoHAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyDtoHAsync_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpyDtoDAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + srcDevice: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemcpyDtoDAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcDevice, + "cuMemcpyDtoDAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &ByteCount, + "cuMemcpyDtoDAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemcpyDtoDAsync_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemcpy2DAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY2D, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy2DAsync_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpy2DAsync_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3DAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3DAsync_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpy3DAsync_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD8_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD8_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD8_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD8_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD16_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD16_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD16_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD16_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD32_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD32_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD32_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD32_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D8_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD2D8_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D8_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD2D8_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D8_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D8_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D16_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD2D16_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D16_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD2D16_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D16_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D16_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D32_v2( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD2D32_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D32_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD2D32_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D32_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D32_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy( + writer: &mut (impl std::io::Write + ?Sized), + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dst, "cuMemcpy", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(src), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&src, "cuMemcpy", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpy", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyAsync( + writer: &mut (impl std::io::Write + ?Sized), + dst: cuda_types::CUdeviceptr, + src: cuda_types::CUdeviceptr, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dst), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dst, "cuMemcpyAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(src), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&src, "cuMemcpyAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyPeer( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyPeer", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstContext, "cuMemcpyPeer", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyPeer", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcContext, "cuMemcpyPeer", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyPeer", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpyPeerAsync( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstContext: cuda_types::CUcontext, + srcDevice: cuda_types::CUdeviceptr, + srcContext: cuda_types::CUcontext, + ByteCount: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemcpyPeerAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstContext, + "cuMemcpyPeerAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&srcDevice, "cuMemcpyPeerAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcContext), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcContext, + "cuMemcpyPeerAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ByteCount), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ByteCount, "cuMemcpyPeerAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpyPeerAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3DPeer( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3DPeer", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemcpy3DPeerAsync( + writer: &mut (impl std::io::Write + ?Sized), + pCopy: *const cuda_types::CUDA_MEMCPY3D_PEER, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCopy), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCopy, "cuMemcpy3DPeerAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemcpy3DPeerAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD8Async( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + uc: ::core::ffi::c_uchar, + N: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemsetD8Async", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD16Async( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + us: ::core::ffi::c_ushort, + N: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD16Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD16Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD16Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemsetD16Async", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD32Async( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + ui: ::core::ffi::c_uint, + N: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD32Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD32Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(N), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&N, "cuMemsetD32Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemsetD32Async", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D8Async( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + uc: ::core::ffi::c_uchar, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstDevice, "cuMemsetD2D8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(uc), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&uc, "cuMemsetD2D8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D8Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemsetD2D8Async", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D16Async( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + us: ::core::ffi::c_ushort, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D16Async", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D16Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(us), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&us, "cuMemsetD2D16Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D16Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D16Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemsetD2D16Async", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemsetD2D32Async( + writer: &mut (impl std::io::Write + ?Sized), + dstDevice: cuda_types::CUdeviceptr, + dstPitch: usize, + ui: ::core::ffi::c_uint, + Width: usize, + Height: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemsetD2D32Async", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstPitch), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dstPitch, "cuMemsetD2D32Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(ui), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&ui, "cuMemsetD2D32Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Width), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Width, "cuMemsetD2D32Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Height), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Height, "cuMemsetD2D32Async", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemsetD2D32Async", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamGetPriority( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + priority: *mut ::core::ffi::c_int, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamGetPriority", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(priority), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &priority, + "cuStreamGetPriority", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetId( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + streamId: *mut ::core::ffi::c_ulonglong, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamGetId", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(streamId), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&streamId, "cuStreamGetId", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamGetFlags( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + flags: *mut ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamGetFlags", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamGetFlags", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCtx( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + pctx: *mut cuda_types::CUcontext, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamGetCtx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pctx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pctx, "cuStreamGetCtx", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitEvent( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + hEvent: cuda_types::CUevent, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamWaitEvent", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuStreamWaitEvent", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuStreamWaitEvent", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamAddCallback( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + callback: cuda_types::CUstreamCallback, + userData: *mut ::core::ffi::c_void, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamAddCallback", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(callback), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &callback, + "cuStreamAddCallback", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(userData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &userData, + "cuStreamAddCallback", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamAddCallback", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamAttachMemAsync( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + dptr: cuda_types::CUdeviceptr, + length: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamAttachMemAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuStreamAttachMemAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(length), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &length, + "cuStreamAttachMemAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamAttachMemAsync", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamQuery( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamQuery", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamSynchronize( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamSynchronize", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventRecord( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hEvent, "cuEventRecord", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuEventRecord", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuEventRecordWithFlags( + writer: &mut (impl std::io::Write + ?Sized), + hEvent: cuda_types::CUevent, + hStream: cuda_types::CUstream, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hEvent, + "cuEventRecordWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuEventRecordWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuEventRecordWithFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLaunchKernel( + writer: &mut (impl std::io::Write + ?Sized), + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&gridDimX, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&gridDimY, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&gridDimZ, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&blockDimX, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&blockDimY, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&blockDimZ, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &sharedMemBytes, + "cuLaunchKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&kernelParams, "cuLaunchKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(extra), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&extra, "cuLaunchKernel", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunchKernelEx( + writer: &mut (impl std::io::Write + ?Sized), + config: *const cuda_types::CUlaunchConfig, + f: cuda_types::CUfunction, + kernelParams: *mut *mut ::core::ffi::c_void, + extra: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(config), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&config, "cuLaunchKernelEx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunchKernelEx", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &kernelParams, + "cuLaunchKernelEx", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(extra), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&extra, "cuLaunchKernelEx", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuLaunchHostFunc( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + fn_: cuda_types::CUhostFn, + userData: *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuLaunchHostFunc", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(fn_), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&fn_, "cuLaunchHostFunc", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(userData), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&userData, "cuLaunchHostFunc", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphicsMapResources( + writer: &mut (impl std::io::Write + ?Sized), + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuGraphicsMapResources", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resources), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resources, + "cuGraphicsMapResources", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGraphicsMapResources", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsUnmapResources( + writer: &mut (impl std::io::Write + ?Sized), + count: ::core::ffi::c_uint, + resources: *mut cuda_types::CUgraphicsResource, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuGraphicsUnmapResources", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resources), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resources, + "cuGraphicsUnmapResources", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGraphicsUnmapResources", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue32( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&stream, "cuStreamWriteValue32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&addr, "cuStreamWriteValue32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuStreamWriteValue32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamWriteValue32", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue32( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&stream, "cuStreamWaitValue32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&addr, "cuStreamWaitValue32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuStreamWaitValue32", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamWaitValue32", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue64( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&stream, "cuStreamWriteValue64", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&addr, "cuStreamWriteValue64", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuStreamWriteValue64", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamWriteValue64", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue64( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&stream, "cuStreamWaitValue64", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&addr, "cuStreamWaitValue64", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&value, "cuStreamWaitValue64", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamWaitValue64", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamBatchMemOp( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&stream, "cuStreamBatchMemOp", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuStreamBatchMemOp", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramArray), ": ").as_bytes())?; + writer.write_all(b"[")?; + for i in 0..count { + if i != 0 { + writer.write_all(b", ")?; + } + crate::format::CudaDisplay::write( + unsafe { &*paramArray.add(i as usize) }, + "cuStreamBatchMemOp", + arg_idx, + writer, + )?; + } + writer.write_all(b"]")?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamBatchMemOp", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue32_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWriteValue32_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWriteValue32_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWriteValue32_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWriteValue32_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue32_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWaitValue32_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWaitValue32_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWaitValue32_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWaitValue32_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue64_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWriteValue64_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWriteValue64_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWriteValue64_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWriteValue64_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue64_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWaitValue64_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWaitValue64_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWaitValue64_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWaitValue64_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBatchMemOp_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamBatchMemOp_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &count, + "cuStreamBatchMemOp_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramArray), ": ").as_bytes())?; + writer.write_all(b"[")?; + for i in 0..count { + if i != 0 { + writer.write_all(b", ")?; + } + crate::format::CudaDisplay::write( + unsafe { &*paramArray.add(i as usize) }, + "cuStreamBatchMemOp_ptsz", + arg_idx, + writer, + )?; + } + writer.write_all(b"]")?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamBatchMemOp_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue32_v2( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWriteValue32_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWriteValue32_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWriteValue32_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWriteValue32_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue32_v2( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint32_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWaitValue32_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&addr, "cuStreamWaitValue32_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWaitValue32_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWaitValue32_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWriteValue64_v2( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWriteValue64_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &addr, + "cuStreamWriteValue64_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWriteValue64_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWriteValue64_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamWaitValue64_v2( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + addr: cuda_types::CUdeviceptr, + value: cuda_types::cuuint64_t, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamWaitValue64_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(addr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&addr, "cuStreamWaitValue64_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(value), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &value, + "cuStreamWaitValue64_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamWaitValue64_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBatchMemOp_v2( + writer: &mut (impl std::io::Write + ?Sized), + stream: cuda_types::CUstream, + count: ::core::ffi::c_uint, + paramArray: *mut cuda_types::CUstreamBatchMemOpParams, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuStreamBatchMemOp_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuStreamBatchMemOp_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramArray), ": ").as_bytes())?; + writer.write_all(b"[")?; + for i in 0..count { + if i != 0 { + writer.write_all(b", ")?; + } + crate::format::CudaDisplay::write( + unsafe { &*paramArray.add(i as usize) }, + "cuStreamBatchMemOp_v2", + arg_idx, + writer, + )?; + } + writer.write_all(b"]")?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuStreamBatchMemOp_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPrefetchAsync( + writer: &mut (impl std::io::Write + ?Sized), + devPtr: cuda_types::CUdeviceptr, + count: usize, + dstDevice: cuda_types::CUdevice, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&devPtr, "cuMemPrefetchAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuMemPrefetchAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dstDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstDevice, + "cuMemPrefetchAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemPrefetchAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemPrefetchAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + devPtr: cuda_types::CUdeviceptr, + count: usize, + location: cuda_types::CUmemLocation, + flags: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(devPtr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &devPtr, + "cuMemPrefetchAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuMemPrefetchAsync_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(location), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &location, + "cuMemPrefetchAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuMemPrefetchAsync_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemPrefetchAsync_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuLaunchCooperativeKernel( + writer: &mut (impl std::io::Write + ?Sized), + f: cuda_types::CUfunction, + gridDimX: ::core::ffi::c_uint, + gridDimY: ::core::ffi::c_uint, + gridDimZ: ::core::ffi::c_uint, + blockDimX: ::core::ffi::c_uint, + blockDimY: ::core::ffi::c_uint, + blockDimZ: ::core::ffi::c_uint, + sharedMemBytes: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, + kernelParams: *mut *mut ::core::ffi::c_void, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(f), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&f, "cuLaunchCooperativeKernel", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimX, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimY, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(gridDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &gridDimZ, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimX), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimX, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimY), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimY, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(blockDimZ), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &blockDimZ, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(sharedMemBytes), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &sharedMemBytes, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(kernelParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &kernelParams, + "cuLaunchCooperativeKernel", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuSignalExternalSemaphoresAsync( + writer: &mut (impl std::io::Write + ?Sized), + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extSemArray, + "cuSignalExternalSemaphoresAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶msArray, + "cuSignalExternalSemaphoresAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numExtSems, + "cuSignalExternalSemaphoresAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuSignalExternalSemaphoresAsync", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuWaitExternalSemaphoresAsync( + writer: &mut (impl std::io::Write + ?Sized), + extSemArray: *const cuda_types::CUexternalSemaphore, + paramsArray: *const cuda_types::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, + numExtSems: ::core::ffi::c_uint, + stream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(extSemArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &extSemArray, + "cuWaitExternalSemaphoresAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(paramsArray), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + ¶msArray, + "cuWaitExternalSemaphoresAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numExtSems), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numExtSems, + "cuWaitExternalSemaphoresAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuWaitExternalSemaphoresAsync", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBeginCapture( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamBeginCapture", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBeginCapture_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamBeginCapture_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBeginCapture_v2( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + mode: cuda_types::CUstreamCaptureMode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamBeginCapture_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mode, + "cuStreamBeginCapture_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamBeginCaptureToGraph( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + mode: cuda_types::CUstreamCaptureMode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamBeginCaptureToGraph", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuStreamBeginCaptureToGraph", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuStreamBeginCaptureToGraph", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencyData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencyData, + "cuStreamBeginCaptureToGraph", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuStreamBeginCaptureToGraph", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mode, + "cuStreamBeginCaptureToGraph", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamEndCapture( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + phGraph: *mut cuda_types::CUgraph, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamEndCapture", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(phGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&phGraph, "cuStreamEndCapture", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamIsCapturing( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus: *mut cuda_types::CUstreamCaptureStatus, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuStreamIsCapturing", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus, + "cuStreamIsCapturing", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCaptureInfo( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetCaptureInfo", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus_out, + "cuStreamGetCaptureInfo", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(id_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &id_out, + "cuStreamGetCaptureInfo", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCaptureInfo_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetCaptureInfo_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus_out, + "cuStreamGetCaptureInfo_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(id_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &id_out, + "cuStreamGetCaptureInfo_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCaptureInfo_v2( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + numDependencies_out: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetCaptureInfo_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus_out, + "cuStreamGetCaptureInfo_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(id_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &id_out, + "cuStreamGetCaptureInfo_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(graph_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &graph_out, + "cuStreamGetCaptureInfo_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies_out, + "cuStreamGetCaptureInfo_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies_out, + "cuStreamGetCaptureInfo_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamGetCaptureInfo_v3( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + captureStatus_out: *mut cuda_types::CUstreamCaptureStatus, + id_out: *mut cuda_types::cuuint64_t, + graph_out: *mut cuda_types::CUgraph, + dependencies_out: *mut *const cuda_types::CUgraphNode, + edgeData_out: *mut *const cuda_types::CUgraphEdgeData, + numDependencies_out: *mut usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamGetCaptureInfo_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(captureStatus_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &captureStatus_out, + "cuStreamGetCaptureInfo_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(id_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &id_out, + "cuStreamGetCaptureInfo_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(graph_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &graph_out, + "cuStreamGetCaptureInfo_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies_out, + "cuStreamGetCaptureInfo_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(edgeData_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &edgeData_out, + "cuStreamGetCaptureInfo_v3", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies_out, + "cuStreamGetCaptureInfo_v3", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphAddKernelNode( + writer: &mut (impl std::io::Write + ?Sized), + phGraphNode: *mut cuda_types::CUgraphNode, + hGraph: cuda_types::CUgraph, + dependencies: *const cuda_types::CUgraphNode, + numDependencies: usize, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphNode, + "cuGraphAddKernelNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphAddKernelNode", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuGraphAddKernelNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuGraphAddKernelNode", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphAddKernelNode", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphKernelNodeGetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *mut cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphKernelNodeGetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphKernelNodeGetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphKernelNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphKernelNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphKernelNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecKernelNodeSetParams( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hNode: cuda_types::CUgraphNode, + nodeParams: *const cuda_types::CUDA_KERNEL_NODE_PARAMS_v1, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecKernelNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hNode, + "cuGraphExecKernelNodeSetParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(nodeParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &nodeParams, + "cuGraphExecKernelNodeSetParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphInstantiateWithParams( + writer: &mut (impl std::io::Write + ?Sized), + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + instantiateParams: *mut cuda_types::CUDA_GRAPH_INSTANTIATE_PARAMS, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphExec, + "cuGraphInstantiateWithParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphInstantiateWithParams", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(instantiateParams), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &instantiateParams, + "cuGraphInstantiateWithParams", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphExecUpdate( + writer: &mut (impl std::io::Write + ?Sized), + hGraphExec: cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + hErrorNode_out: *mut cuda_types::CUgraphNode, + updateResult_out: *mut cuda_types::CUgraphExecUpdateResult, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraphExec, + "cuGraphExecUpdate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphExecUpdate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hErrorNode_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hErrorNode_out, + "cuGraphExecUpdate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(updateResult_out), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &updateResult_out, + "cuGraphExecUpdate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphUpload( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphUpload", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuGraphUpload", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphLaunch( + writer: &mut (impl std::io::Write + ?Sized), + hGraph: cuda_types::CUgraphExec, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphLaunch", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuGraphLaunch", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuStreamCopyAttributes( + writer: &mut (impl std::io::Write + ?Sized), + dstStream: cuda_types::CUstream, + srcStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dstStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dstStream, + "cuStreamCopyAttributes", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(srcStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &srcStream, + "cuStreamCopyAttributes", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuIpcOpenMemHandle( + writer: &mut (impl std::io::Write + ?Sized), + pdptr: *mut cuda_types::CUdeviceptr, + handle: cuda_types::CUipcMemHandle, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pdptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pdptr, "cuIpcOpenMemHandle", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(handle), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&handle, "cuIpcOpenMemHandle", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuIpcOpenMemHandle", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGraphInstantiate( + writer: &mut (impl std::io::Write + ?Sized), + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + phErrorNode: *mut cuda_types::CUgraphNode, + logBuffer: *mut ::core::ffi::c_char, + bufferSize: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphExec, + "cuGraphInstantiate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hGraph, "cuGraphInstantiate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(phErrorNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phErrorNode, + "cuGraphInstantiate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(logBuffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &logBuffer, + "cuGraphInstantiate", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bufferSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bufferSize, + "cuGraphInstantiate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphInstantiate_v2( + writer: &mut (impl std::io::Write + ?Sized), + phGraphExec: *mut cuda_types::CUgraphExec, + hGraph: cuda_types::CUgraph, + phErrorNode: *mut cuda_types::CUgraphNode, + logBuffer: *mut ::core::ffi::c_char, + bufferSize: usize, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phGraphExec), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phGraphExec, + "cuGraphInstantiate_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hGraph), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hGraph, + "cuGraphInstantiate_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(phErrorNode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phErrorNode, + "cuGraphInstantiate_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(logBuffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &logBuffer, + "cuGraphInstantiate_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bufferSize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bufferSize, + "cuGraphInstantiate_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuMemMapArrayAsync( + writer: &mut (impl std::io::Write + ?Sized), + mapInfoList: *mut cuda_types::CUarrayMapInfo, + count: ::core::ffi::c_uint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(mapInfoList), ": ").as_bytes())?; + writer.write_all(b"[")?; + for i in 0..count { + if i != 0 { + writer.write_all(b", ")?; + } + crate::format::CudaDisplay::write( + unsafe { &*mapInfoList.add(i as usize) }, + "cuMemMapArrayAsync", + arg_idx, + writer, + )?; + } + writer.write_all(b"]")?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(count), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&count, "cuMemMapArrayAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemMapArrayAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemFreeAsync( + writer: &mut (impl std::io::Write + ?Sized), + dptr: cuda_types::CUdeviceptr, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemFreeAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemFreeAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocAsync( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuMemAllocAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&bytesize, "cuMemAllocAsync", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&hStream, "cuMemAllocAsync", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuMemAllocFromPoolAsync( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + bytesize: usize, + pool: cuda_types::CUmemoryPool, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuMemAllocFromPoolAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(bytesize), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &bytesize, + "cuMemAllocFromPoolAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pool), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pool, + "cuMemAllocFromPoolAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuMemAllocFromPoolAsync", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamUpdateCaptureDependencies( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + numDependencies: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamUpdateCaptureDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuStreamUpdateCaptureDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuStreamUpdateCaptureDependencies", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamUpdateCaptureDependencies", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuStreamUpdateCaptureDependencies_v2( + writer: &mut (impl std::io::Write + ?Sized), + hStream: cuda_types::CUstream, + dependencies: *mut cuda_types::CUgraphNode, + dependencyData: *const cuda_types::CUgraphEdgeData, + numDependencies: usize, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuStreamUpdateCaptureDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencies, + "cuStreamUpdateCaptureDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(dependencyData), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dependencyData, + "cuStreamUpdateCaptureDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(numDependencies), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &numDependencies, + "cuStreamUpdateCaptureDependencies_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuStreamUpdateCaptureDependencies_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGetProcAddress( + writer: &mut (impl std::io::Write + ?Sized), + symbol: *const ::core::ffi::c_char, + pfn: *mut *mut ::core::ffi::c_void, + cudaVersion: ::core::ffi::c_int, + flags: cuda_types::cuuint64_t, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(symbol), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&symbol, "cuGetProcAddress", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pfn), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pfn, "cuGetProcAddress", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(cudaVersion), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &cudaVersion, + "cuGetProcAddress", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuGetProcAddress", arg_idx, writer)?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUoutput_mode_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUoutput_mode_enum::CU_OUT_KEY_VALUE_PAIR => { + writer.write_all(stringify!(CU_OUT_KEY_VALUE_PAIR).as_bytes()) + } + &cuda_types::CUoutput_mode_enum::CU_OUT_CSV => { + writer.write_all(stringify!(CU_OUT_CSV).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +pub fn write_cuProfilerInitialize( + writer: &mut (impl std::io::Write + ?Sized), + configFile: *const ::core::ffi::c_char, + outputFile: *const ::core::ffi::c_char, + outputMode: cuda_types::CUoutput_mode, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(configFile), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &configFile, + "cuProfilerInitialize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(outputFile), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &outputFile, + "cuProfilerInitialize", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(outputMode), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &outputMode, + "cuProfilerInitialize", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuProfilerStart( + writer: &mut (impl std::io::Write + ?Sized), +) -> std::io::Result<()> { + writer.write_all(b"()") +} +pub fn write_cuProfilerStop( + writer: &mut (impl std::io::Write + ?Sized), +) -> std::io::Result<()> { + writer.write_all(b"()") +} +pub fn write_cuGraphicsGLRegisterBuffer( + writer: &mut (impl std::io::Write + ?Sized), + pCudaResource: *mut cuda_types::CUgraphicsResource, + buffer: cuda_types::GLuint, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCudaResource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCudaResource, + "cuGraphicsGLRegisterBuffer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGraphicsGLRegisterBuffer", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Flags, + "cuGraphicsGLRegisterBuffer", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsGLRegisterImage( + writer: &mut (impl std::io::Write + ?Sized), + pCudaResource: *mut cuda_types::CUgraphicsResource, + image: cuda_types::GLuint, + target: cuda_types::GLenum, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCudaResource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCudaResource, + "cuGraphicsGLRegisterImage", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(image), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &image, + "cuGraphicsGLRegisterImage", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(target), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &target, + "cuGraphicsGLRegisterImage", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Flags, + "cuGraphicsGLRegisterImage", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUGLDeviceList_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_ALL => { + writer.write_all(stringify!(CU_GL_DEVICE_LIST_ALL).as_bytes()) + } + &cuda_types::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_CURRENT_FRAME => { + writer.write_all(stringify!(CU_GL_DEVICE_LIST_CURRENT_FRAME).as_bytes()) + } + &cuda_types::CUGLDeviceList_enum::CU_GL_DEVICE_LIST_NEXT_FRAME => { + writer.write_all(stringify!(CU_GL_DEVICE_LIST_NEXT_FRAME).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUGLmap_flags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_NONE => { + writer.write_all(stringify!(CU_GL_MAP_RESOURCE_FLAGS_NONE).as_bytes()) + } + &cuda_types::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY => { + writer + .write_all(stringify!(CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY).as_bytes()) + } + &cuda_types::CUGLmap_flags_enum::CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD => { + writer + .write_all( + stringify!(CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD).as_bytes(), + ) + } + _ => write!(writer, "{}", self.0), + } + } +} +pub fn write_cuGLCtxCreate_v2( + writer: &mut (impl std::io::Write + ?Sized), + pCtx: *mut cuda_types::CUcontext, + Flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCtx, "cuGLCtxCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuGLCtxCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuGLCtxCreate_v2", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGLInit( + writer: &mut (impl std::io::Write + ?Sized), +) -> std::io::Result<()> { + writer.write_all(b"()") +} +pub fn write_cuGLRegisterBufferObject( + writer: &mut (impl std::io::Write + ?Sized), + buffer: cuda_types::GLuint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLRegisterBufferObject", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLMapBufferObject_v2_ptds( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuGLMapBufferObject_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &size, + "cuGLMapBufferObject_v2_ptds", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLMapBufferObject_v2_ptds", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLUnmapBufferObject( + writer: &mut (impl std::io::Write + ?Sized), + buffer: cuda_types::GLuint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLUnmapBufferObject", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLUnregisterBufferObject( + writer: &mut (impl std::io::Write + ?Sized), + buffer: cuda_types::GLuint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLUnregisterBufferObject", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLSetBufferObjectMapFlags( + writer: &mut (impl std::io::Write + ?Sized), + buffer: cuda_types::GLuint, + Flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLSetBufferObjectMapFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &Flags, + "cuGLSetBufferObjectMapFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLMapBufferObjectAsync_v2_ptsz( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuGLMapBufferObjectAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &size, + "cuGLMapBufferObjectAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLMapBufferObjectAsync_v2_ptsz", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGLMapBufferObjectAsync_v2_ptsz", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLUnmapBufferObjectAsync( + writer: &mut (impl std::io::Write + ?Sized), + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLUnmapBufferObjectAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGLUnmapBufferObjectAsync", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLMapBufferObject_v2( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuGLMapBufferObject_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuGLMapBufferObject_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLMapBufferObject_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLMapBufferObjectAsync_v2( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr, + size: *mut usize, + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuGLMapBufferObjectAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &size, + "cuGLMapBufferObjectAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLMapBufferObjectAsync_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGLMapBufferObjectAsync_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGLCtxCreate( + writer: &mut (impl std::io::Write + ?Sized), + pCtx: *mut cuda_types::CUcontext, + Flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCtx, "cuGLCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(Flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&Flags, "cuGLCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuGLCtxCreate", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGLMapBufferObject( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr_v1, + size: *mut ::core::ffi::c_uint, + buffer: cuda_types::GLuint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&dptr, "cuGLMapBufferObject", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&size, "cuGLMapBufferObject", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&buffer, "cuGLMapBufferObject", arg_idx, writer)?; + writer.write_all(b")") +} +pub fn write_cuGLMapBufferObjectAsync( + writer: &mut (impl std::io::Write + ?Sized), + dptr: *mut cuda_types::CUdeviceptr_v1, + size: *mut ::core::ffi::c_uint, + buffer: cuda_types::GLuint, + hStream: cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(dptr), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &dptr, + "cuGLMapBufferObjectAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(size), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &size, + "cuGLMapBufferObjectAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(buffer), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &buffer, + "cuGLMapBufferObjectAsync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(hStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &hStream, + "cuGLMapBufferObjectAsync", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUeglFrameType_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUeglFrameType_enum::CU_EGL_FRAME_TYPE_ARRAY => { + writer.write_all(stringify!(CU_EGL_FRAME_TYPE_ARRAY).as_bytes()) + } + &cuda_types::CUeglFrameType_enum::CU_EGL_FRAME_TYPE_PITCH => { + writer.write_all(stringify!(CU_EGL_FRAME_TYPE_PITCH).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUeglResourceLocationFlags_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUeglResourceLocationFlags_enum::CU_EGL_RESOURCE_LOCATION_SYSMEM => { + writer.write_all(stringify!(CU_EGL_RESOURCE_LOCATION_SYSMEM).as_bytes()) + } + &cuda_types::CUeglResourceLocationFlags_enum::CU_EGL_RESOURCE_LOCATION_VIDMEM => { + writer.write_all(stringify!(CU_EGL_RESOURCE_LOCATION_VIDMEM).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUeglColorFormat_enum { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_PLANAR => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV422_PLANAR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RGB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_RGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BGR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_ARGB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_ARGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RGBA => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_RGBA).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_L => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_L).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_R => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_R).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_PLANAR => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV444_PLANAR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUYV_422 => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUYV_422).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_UYVY_422 => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_UYVY_422).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_ABGR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_ABGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BGRA => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BGRA).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_A => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_A).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_RG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_RG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_AYUV => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_AYUV).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_VYUY_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_VYUY_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_UYVY_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_UYVY_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUYV_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUYV_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVYU_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YVYU_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUVA_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUVA_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_AYUV_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_AYUV_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_RGGB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_RGGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_BGGR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_BGGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_GRBG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_GRBG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_GBRG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_GBRG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_RGGB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_RGGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_BGGR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_BGGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_GRBG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_GRBG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_GBRG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_GBRG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_RGGB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_RGGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_BGGR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_BGGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_GRBG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_GRBG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_GBRG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_GBRG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_RGGB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_RGGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_BGGR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_BGGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_GRBG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_GRBG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER14_GBRG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER14_GBRG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_RGGB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_RGGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_BGGR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_BGGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_GRBG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_GRBG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER20_GBRG => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER20_GBRG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU444_PLANAR => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_YVU444_PLANAR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU422_PLANAR => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_YVU422_PLANAR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG => { + writer + .write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_BCCR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_BCCR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_RCCB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_RCCB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_CRBC => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_CRBC).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER_CBRC => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER_CBRC).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER10_CCCC => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER10_CCCC).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_BCCR => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_BCCR).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_RCCB => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_RCCB).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CRBC => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_CRBC).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CBRC => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_CBRC).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_BAYER12_CCCC => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_BAYER12_CCCC).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_2020).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_2020).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR_2020).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR_2020).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_709).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_709).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YUV420_PLANAR_709).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_YVU420_PLANAR_709).as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_2020) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_2020) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709 => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_422_SEMIPLANAR_709) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y_709_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y_709_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y10_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10_709_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y10_709_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y12_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12_709_ER => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_Y12_709_ER).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUVA => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUVA).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YUV => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YUV).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_YVYU => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_YVYU).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_VYUY => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_VYUY).as_bytes()) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR_709_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR_709_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR_709_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER => { + writer + .write_all( + stringify!(CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR_709_ER) + .as_bytes(), + ) + } + &cuda_types::CUeglColorFormat_enum::CU_EGL_COLOR_FORMAT_MAX => { + writer.write_all(stringify!(CU_EGL_COLOR_FORMAT_MAX).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::CUeglStreamConnection { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!(writer, "{:p}", *self) + } +} +pub fn write_cuGraphicsEGLRegisterImage( + writer: &mut (impl std::io::Write + ?Sized), + pCudaResource: *mut cuda_types::CUgraphicsResource, + image: cuda_types::EGLImageKHR, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCudaResource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCudaResource, + "cuGraphicsEGLRegisterImage", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(image), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &image, + "cuGraphicsEGLRegisterImage", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphicsEGLRegisterImage", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamConsumerConnect( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, + stream: cuda_types::EGLStreamKHR, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamConsumerConnect", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuEGLStreamConsumerConnect", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamConsumerConnectWithFlags( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, + stream: cuda_types::EGLStreamKHR, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamConsumerConnectWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuEGLStreamConsumerConnectWithFlags", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuEGLStreamConsumerConnectWithFlags", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamConsumerDisconnect( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamConsumerDisconnect", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamConsumerAcquireFrame( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, + pCudaResource: *mut cuda_types::CUgraphicsResource, + pStream: *mut cuda_types::CUstream, + timeout: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamConsumerAcquireFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pCudaResource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCudaResource, + "cuEGLStreamConsumerAcquireFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pStream, + "cuEGLStreamConsumerAcquireFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(timeout), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &timeout, + "cuEGLStreamConsumerAcquireFrame", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamConsumerReleaseFrame( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, + pCudaResource: cuda_types::CUgraphicsResource, + pStream: *mut cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamConsumerReleaseFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pCudaResource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCudaResource, + "cuEGLStreamConsumerReleaseFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pStream, + "cuEGLStreamConsumerReleaseFrame", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamProducerConnect( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, + stream: cuda_types::EGLStreamKHR, + width: cuda_types::EGLint, + height: cuda_types::EGLint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamProducerConnect", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(stream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &stream, + "cuEGLStreamProducerConnect", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(width), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &width, + "cuEGLStreamProducerConnect", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(height), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &height, + "cuEGLStreamProducerConnect", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamProducerDisconnect( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamProducerDisconnect", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamProducerPresentFrame( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, + eglframe: cuda_types::CUeglFrame, + pStream: *mut cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamProducerPresentFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(eglframe), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &eglframe, + "cuEGLStreamProducerPresentFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pStream, + "cuEGLStreamProducerPresentFrame", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEGLStreamProducerReturnFrame( + writer: &mut (impl std::io::Write + ?Sized), + conn: *mut cuda_types::CUeglStreamConnection, + eglframe: *mut cuda_types::CUeglFrame, + pStream: *mut cuda_types::CUstream, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(conn), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &conn, + "cuEGLStreamProducerReturnFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(eglframe), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &eglframe, + "cuEGLStreamProducerReturnFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(pStream), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pStream, + "cuEGLStreamProducerReturnFrame", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsResourceGetMappedEglFrame( + writer: &mut (impl std::io::Write + ?Sized), + eglFrame: *mut cuda_types::CUeglFrame, + resource: cuda_types::CUgraphicsResource, + index: ::core::ffi::c_uint, + mipLevel: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(eglFrame), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &eglFrame, + "cuGraphicsResourceGetMappedEglFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(resource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &resource, + "cuGraphicsResourceGetMappedEglFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(index), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &index, + "cuGraphicsResourceGetMappedEglFrame", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(mipLevel), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &mipLevel, + "cuGraphicsResourceGetMappedEglFrame", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuEventCreateFromEGLSync( + writer: &mut (impl std::io::Write + ?Sized), + phEvent: *mut cuda_types::CUevent, + eglSync: cuda_types::EGLSyncKHR, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(phEvent), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &phEvent, + "cuEventCreateFromEGLSync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(eglSync), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &eglSync, + "cuEventCreateFromEGLSync", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuEventCreateFromEGLSync", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::VdpStatus { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + &cuda_types::VdpStatus::VDP_STATUS_OK => { + writer.write_all(stringify!(VDP_STATUS_OK).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_NO_IMPLEMENTATION => { + writer.write_all(stringify!(VDP_STATUS_NO_IMPLEMENTATION).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_DISPLAY_PREEMPTED => { + writer.write_all(stringify!(VDP_STATUS_DISPLAY_PREEMPTED).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_HANDLE => { + writer.write_all(stringify!(VDP_STATUS_INVALID_HANDLE).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_POINTER => { + writer.write_all(stringify!(VDP_STATUS_INVALID_POINTER).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_CHROMA_TYPE => { + writer.write_all(stringify!(VDP_STATUS_INVALID_CHROMA_TYPE).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_Y_CB_CR_FORMAT => { + writer + .write_all(stringify!(VDP_STATUS_INVALID_Y_CB_CR_FORMAT).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_RGBA_FORMAT => { + writer.write_all(stringify!(VDP_STATUS_INVALID_RGBA_FORMAT).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_INDEXED_FORMAT => { + writer + .write_all(stringify!(VDP_STATUS_INVALID_INDEXED_FORMAT).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_COLOR_STANDARD => { + writer + .write_all(stringify!(VDP_STATUS_INVALID_COLOR_STANDARD).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_COLOR_TABLE_FORMAT => { + writer + .write_all( + stringify!(VDP_STATUS_INVALID_COLOR_TABLE_FORMAT).as_bytes(), + ) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_BLEND_FACTOR => { + writer.write_all(stringify!(VDP_STATUS_INVALID_BLEND_FACTOR).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_BLEND_EQUATION => { + writer + .write_all(stringify!(VDP_STATUS_INVALID_BLEND_EQUATION).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_FLAG => { + writer.write_all(stringify!(VDP_STATUS_INVALID_FLAG).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_DECODER_PROFILE => { + writer + .write_all(stringify!(VDP_STATUS_INVALID_DECODER_PROFILE).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE => { + writer + .write_all( + stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_FEATURE).as_bytes(), + ) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER => { + writer + .write_all( + stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_PARAMETER).as_bytes(), + ) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE => { + writer + .write_all( + stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_ATTRIBUTE).as_bytes(), + ) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE => { + writer + .write_all( + stringify!(VDP_STATUS_INVALID_VIDEO_MIXER_PICTURE_STRUCTURE) + .as_bytes(), + ) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_FUNC_ID => { + writer.write_all(stringify!(VDP_STATUS_INVALID_FUNC_ID).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_SIZE => { + writer.write_all(stringify!(VDP_STATUS_INVALID_SIZE).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_VALUE => { + writer.write_all(stringify!(VDP_STATUS_INVALID_VALUE).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_INVALID_STRUCT_VERSION => { + writer + .write_all(stringify!(VDP_STATUS_INVALID_STRUCT_VERSION).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_RESOURCES => { + writer.write_all(stringify!(VDP_STATUS_RESOURCES).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_HANDLE_DEVICE_MISMATCH => { + writer + .write_all(stringify!(VDP_STATUS_HANDLE_DEVICE_MISMATCH).as_bytes()) + } + &cuda_types::VdpStatus::VDP_STATUS_ERROR => { + writer.write_all(stringify!(VDP_STATUS_ERROR).as_bytes()) + } + _ => write!(writer, "{}", self.0), + } + } +} +impl crate::format::CudaDisplay for cuda_types::VdpGetProcAddress { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + write!( + writer, + "{:p}", + unsafe { + std::mem::transmute::< + cuda_types::VdpGetProcAddress, + *mut ::std::ffi::c_void, + >(*self) + }, + ) + } +} +pub fn write_cuVDPAUGetDevice( + writer: &mut (impl std::io::Write + ?Sized), + pDevice: *mut cuda_types::CUdevice, + vdpDevice: cuda_types::VdpDevice, + vdpGetProcAddress: cuda_types::VdpGetProcAddress, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pDevice, "cuVDPAUGetDevice", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&vdpDevice, "cuVDPAUGetDevice", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpGetProcAddress), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &vdpGetProcAddress, + "cuVDPAUGetDevice", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuVDPAUCtxCreate_v2( + writer: &mut (impl std::io::Write + ?Sized), + pCtx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, + vdpDevice: cuda_types::VdpDevice, + vdpGetProcAddress: cuda_types::VdpGetProcAddress, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCtx, "cuVDPAUCtxCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuVDPAUCtxCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuVDPAUCtxCreate_v2", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &vdpDevice, + "cuVDPAUCtxCreate_v2", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpGetProcAddress), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &vdpGetProcAddress, + "cuVDPAUCtxCreate_v2", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsVDPAURegisterVideoSurface( + writer: &mut (impl std::io::Write + ?Sized), + pCudaResource: *mut cuda_types::CUgraphicsResource, + vdpSurface: cuda_types::VdpVideoSurface, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCudaResource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCudaResource, + "cuGraphicsVDPAURegisterVideoSurface", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpSurface), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &vdpSurface, + "cuGraphicsVDPAURegisterVideoSurface", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphicsVDPAURegisterVideoSurface", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuGraphicsVDPAURegisterOutputSurface( + writer: &mut (impl std::io::Write + ?Sized), + pCudaResource: *mut cuda_types::CUgraphicsResource, + vdpSurface: cuda_types::VdpOutputSurface, + flags: ::core::ffi::c_uint, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCudaResource), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &pCudaResource, + "cuGraphicsVDPAURegisterOutputSurface", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpSurface), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &vdpSurface, + "cuGraphicsVDPAURegisterOutputSurface", + arg_idx, + writer, + )?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &flags, + "cuGraphicsVDPAURegisterOutputSurface", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +pub fn write_cuVDPAUCtxCreate( + writer: &mut (impl std::io::Write + ?Sized), + pCtx: *mut cuda_types::CUcontext, + flags: ::core::ffi::c_uint, + device: cuda_types::CUdevice, + vdpDevice: cuda_types::VdpDevice, + vdpGetProcAddress: cuda_types::VdpGetProcAddress, +) -> std::io::Result<()> { + let mut arg_idx = 0usize; + writer.write_all(b"(")?; + writer.write_all(concat!(stringify!(pCtx), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&pCtx, "cuVDPAUCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(flags), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&flags, "cuVDPAUCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(device), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&device, "cuVDPAUCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpDevice), ": ").as_bytes())?; + crate::format::CudaDisplay::write(&vdpDevice, "cuVDPAUCtxCreate", arg_idx, writer)?; + arg_idx += 1; + writer.write_all(b", ")?; + writer.write_all(concat!(stringify!(vdpGetProcAddress), ": ").as_bytes())?; + crate::format::CudaDisplay::write( + &vdpGetProcAddress, + "cuVDPAUCtxCreate", + arg_idx, + writer, + )?; + writer.write_all(b")") +} +impl crate::format::CudaDisplay for cuda_types::CUresult { + fn write( + &self, + _fn_name: &'static str, + _index: usize, + writer: &mut (impl std::io::Write + ?Sized), + ) -> std::io::Result<()> { + match self { + Ok(()) => writer.write_all(b"CUDA_SUCCESS"), + Err(err) => { + match err.0.get() { + 1 => writer.write_all("CUDA_ERROR_INVALID_VALUE".as_bytes()), + 2 => writer.write_all("CUDA_ERROR_OUT_OF_MEMORY".as_bytes()), + 3 => writer.write_all("CUDA_ERROR_NOT_INITIALIZED".as_bytes()), + 4 => writer.write_all("CUDA_ERROR_DEINITIALIZED".as_bytes()), + 5 => writer.write_all("CUDA_ERROR_PROFILER_DISABLED".as_bytes()), + 6 => { + writer + .write_all("CUDA_ERROR_PROFILER_NOT_INITIALIZED".as_bytes()) + } + 7 => { + writer + .write_all("CUDA_ERROR_PROFILER_ALREADY_STARTED".as_bytes()) + } + 8 => { + writer + .write_all("CUDA_ERROR_PROFILER_ALREADY_STOPPED".as_bytes()) + } + 34 => writer.write_all("CUDA_ERROR_STUB_LIBRARY".as_bytes()), + 46 => writer.write_all("CUDA_ERROR_DEVICE_UNAVAILABLE".as_bytes()), + 100 => writer.write_all("CUDA_ERROR_NO_DEVICE".as_bytes()), + 101 => writer.write_all("CUDA_ERROR_INVALID_DEVICE".as_bytes()), + 102 => writer.write_all("CUDA_ERROR_DEVICE_NOT_LICENSED".as_bytes()), + 200 => writer.write_all("CUDA_ERROR_INVALID_IMAGE".as_bytes()), + 201 => writer.write_all("CUDA_ERROR_INVALID_CONTEXT".as_bytes()), + 202 => { + writer.write_all("CUDA_ERROR_CONTEXT_ALREADY_CURRENT".as_bytes()) + } + 205 => writer.write_all("CUDA_ERROR_MAP_FAILED".as_bytes()), + 206 => writer.write_all("CUDA_ERROR_UNMAP_FAILED".as_bytes()), + 207 => writer.write_all("CUDA_ERROR_ARRAY_IS_MAPPED".as_bytes()), + 208 => writer.write_all("CUDA_ERROR_ALREADY_MAPPED".as_bytes()), + 209 => writer.write_all("CUDA_ERROR_NO_BINARY_FOR_GPU".as_bytes()), + 210 => writer.write_all("CUDA_ERROR_ALREADY_ACQUIRED".as_bytes()), + 211 => writer.write_all("CUDA_ERROR_NOT_MAPPED".as_bytes()), + 212 => writer.write_all("CUDA_ERROR_NOT_MAPPED_AS_ARRAY".as_bytes()), + 213 => { + writer.write_all("CUDA_ERROR_NOT_MAPPED_AS_POINTER".as_bytes()) + } + 214 => writer.write_all("CUDA_ERROR_ECC_UNCORRECTABLE".as_bytes()), + 215 => writer.write_all("CUDA_ERROR_UNSUPPORTED_LIMIT".as_bytes()), + 216 => { + writer.write_all("CUDA_ERROR_CONTEXT_ALREADY_IN_USE".as_bytes()) + } + 217 => { + writer.write_all("CUDA_ERROR_PEER_ACCESS_UNSUPPORTED".as_bytes()) + } + 218 => writer.write_all("CUDA_ERROR_INVALID_PTX".as_bytes()), + 219 => { + writer + .write_all("CUDA_ERROR_INVALID_GRAPHICS_CONTEXT".as_bytes()) + } + 220 => writer.write_all("CUDA_ERROR_NVLINK_UNCORRECTABLE".as_bytes()), + 221 => { + writer.write_all("CUDA_ERROR_JIT_COMPILER_NOT_FOUND".as_bytes()) + } + 222 => { + writer.write_all("CUDA_ERROR_UNSUPPORTED_PTX_VERSION".as_bytes()) + } + 223 => { + writer + .write_all("CUDA_ERROR_JIT_COMPILATION_DISABLED".as_bytes()) + } + 224 => { + writer + .write_all("CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY".as_bytes()) + } + 225 => { + writer + .write_all("CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC".as_bytes()) + } + 300 => writer.write_all("CUDA_ERROR_INVALID_SOURCE".as_bytes()), + 301 => writer.write_all("CUDA_ERROR_FILE_NOT_FOUND".as_bytes()), + 302 => { + writer + .write_all( + "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND".as_bytes(), + ) + } + 303 => { + writer + .write_all("CUDA_ERROR_SHARED_OBJECT_INIT_FAILED".as_bytes()) + } + 304 => writer.write_all("CUDA_ERROR_OPERATING_SYSTEM".as_bytes()), + 400 => writer.write_all("CUDA_ERROR_INVALID_HANDLE".as_bytes()), + 401 => writer.write_all("CUDA_ERROR_ILLEGAL_STATE".as_bytes()), + 402 => writer.write_all("CUDA_ERROR_LOSSY_QUERY".as_bytes()), + 500 => writer.write_all("CUDA_ERROR_NOT_FOUND".as_bytes()), + 600 => writer.write_all("CUDA_ERROR_NOT_READY".as_bytes()), + 700 => writer.write_all("CUDA_ERROR_ILLEGAL_ADDRESS".as_bytes()), + 701 => { + writer.write_all("CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES".as_bytes()) + } + 702 => writer.write_all("CUDA_ERROR_LAUNCH_TIMEOUT".as_bytes()), + 703 => { + writer + .write_all( + "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING".as_bytes(), + ) + } + 704 => { + writer + .write_all( + "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED".as_bytes(), + ) + } + 705 => { + writer.write_all("CUDA_ERROR_PEER_ACCESS_NOT_ENABLED".as_bytes()) + } + 708 => { + writer.write_all("CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE".as_bytes()) + } + 709 => writer.write_all("CUDA_ERROR_CONTEXT_IS_DESTROYED".as_bytes()), + 710 => writer.write_all("CUDA_ERROR_ASSERT".as_bytes()), + 711 => writer.write_all("CUDA_ERROR_TOO_MANY_PEERS".as_bytes()), + 712 => { + writer + .write_all( + "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED".as_bytes(), + ) + } + 713 => { + writer + .write_all( + "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED".as_bytes(), + ) + } + 714 => writer.write_all("CUDA_ERROR_HARDWARE_STACK_ERROR".as_bytes()), + 715 => writer.write_all("CUDA_ERROR_ILLEGAL_INSTRUCTION".as_bytes()), + 716 => writer.write_all("CUDA_ERROR_MISALIGNED_ADDRESS".as_bytes()), + 717 => { + writer.write_all("CUDA_ERROR_INVALID_ADDRESS_SPACE".as_bytes()) + } + 718 => writer.write_all("CUDA_ERROR_INVALID_PC".as_bytes()), + 719 => writer.write_all("CUDA_ERROR_LAUNCH_FAILED".as_bytes()), + 720 => { + writer + .write_all( + "CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE".as_bytes(), + ) + } + 800 => writer.write_all("CUDA_ERROR_NOT_PERMITTED".as_bytes()), + 801 => writer.write_all("CUDA_ERROR_NOT_SUPPORTED".as_bytes()), + 802 => writer.write_all("CUDA_ERROR_SYSTEM_NOT_READY".as_bytes()), + 803 => { + writer.write_all("CUDA_ERROR_SYSTEM_DRIVER_MISMATCH".as_bytes()) + } + 804 => { + writer + .write_all( + "CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE".as_bytes(), + ) + } + 805 => { + writer.write_all("CUDA_ERROR_MPS_CONNECTION_FAILED".as_bytes()) + } + 806 => writer.write_all("CUDA_ERROR_MPS_RPC_FAILURE".as_bytes()), + 807 => writer.write_all("CUDA_ERROR_MPS_SERVER_NOT_READY".as_bytes()), + 808 => { + writer.write_all("CUDA_ERROR_MPS_MAX_CLIENTS_REACHED".as_bytes()) + } + 809 => { + writer + .write_all( + "CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED".as_bytes(), + ) + } + 810 => { + writer.write_all("CUDA_ERROR_MPS_CLIENT_TERMINATED".as_bytes()) + } + 811 => writer.write_all("CUDA_ERROR_CDP_NOT_SUPPORTED".as_bytes()), + 812 => writer.write_all("CUDA_ERROR_CDP_VERSION_MISMATCH".as_bytes()), + 900 => { + writer + .write_all( + "CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED".as_bytes(), + ) + } + 901 => { + writer + .write_all( + "CUDA_ERROR_STREAM_CAPTURE_INVALIDATED".as_bytes(), + ) + } + 902 => writer.write_all("CUDA_ERROR_STREAM_CAPTURE_MERGE".as_bytes()), + 903 => { + writer + .write_all("CUDA_ERROR_STREAM_CAPTURE_UNMATCHED".as_bytes()) + } + 904 => { + writer.write_all("CUDA_ERROR_STREAM_CAPTURE_UNJOINED".as_bytes()) + } + 905 => { + writer + .write_all("CUDA_ERROR_STREAM_CAPTURE_ISOLATION".as_bytes()) + } + 906 => { + writer.write_all("CUDA_ERROR_STREAM_CAPTURE_IMPLICIT".as_bytes()) + } + 907 => writer.write_all("CUDA_ERROR_CAPTURED_EVENT".as_bytes()), + 908 => { + writer + .write_all( + "CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD".as_bytes(), + ) + } + 909 => writer.write_all("CUDA_ERROR_TIMEOUT".as_bytes()), + 910 => { + writer + .write_all("CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE".as_bytes()) + } + 911 => writer.write_all("CUDA_ERROR_EXTERNAL_DEVICE".as_bytes()), + 912 => writer.write_all("CUDA_ERROR_INVALID_CLUSTER_SIZE".as_bytes()), + 913 => writer.write_all("CUDA_ERROR_FUNCTION_NOT_LOADED".as_bytes()), + 914 => { + writer.write_all("CUDA_ERROR_INVALID_RESOURCE_TYPE".as_bytes()) + } + 915 => { + writer + .write_all( + "CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION".as_bytes(), + ) + } + 999 => writer.write_all("CUDA_ERROR_UNKNOWN".as_bytes()), + err => write!(writer, "{}", err), + } + } + } + } +} diff --git a/zluda_dump/src/lib.rs b/zluda_dump/src/lib.rs index 04fc36ea..c2dff426 100644 --- a/zluda_dump/src/lib.rs +++ b/zluda_dump/src/lib.rs @@ -1,22 +1,18 @@ -use cuda_types::{ - CUdevice, CUdevice_attribute, CUfunction, CUjit_option, CUmodule, CUresult, CUuuid, -}; +use cuda_types::*; use paste::paste; use side_by_side::CudaDynamicFns; use std::io; -use std::{ - collections::HashMap, env, error::Error, ffi::c_void, fs, path::PathBuf, ptr::NonNull, rc::Rc, - sync::Mutex, -}; +use std::{collections::HashMap, env, error::Error, fs, path::PathBuf, rc::Rc, sync::Mutex}; #[macro_use] extern crate lazy_static; extern crate cuda_types; macro_rules! extern_redirect { - ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path);*) => { + ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => { $( #[no_mangle] + #[allow(improper_ctypes_definitions)] pub extern $abi fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type { let original_fn = |dynamic_fns: &mut crate::side_by_side::CudaDynamicFns| { dynamic_fns.$fn_name($( $arg_id ),*) @@ -34,10 +30,11 @@ macro_rules! extern_redirect { } macro_rules! extern_redirect_with_post { - ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path);*) => { + ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => { $( #[no_mangle] - pub extern "system" fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type { + #[allow(improper_ctypes_definitions)] + pub extern $abi fn $fn_name ( $( $arg_id : $arg_type),* ) -> $ret_type { let original_fn = |dynamic_fns: &mut crate::side_by_side::CudaDynamicFns| { dynamic_fns.$fn_name($( $arg_id ),*) }; @@ -60,10 +57,8 @@ macro_rules! extern_redirect_with_post { use cuda_base::cuda_function_declarations; cuda_function_declarations!( - cuda_types, extern_redirect, - extern_redirect_with_post, - [ + extern_redirect_with_post <= [ cuModuleLoad, cuModuleLoadData, cuModuleLoadDataEx, @@ -205,10 +200,10 @@ impl Settings { } }; let libcuda_path = match env::var("ZLUDA_CUDA_LIB") { - Err(env::VarError::NotPresent) => os::LIBCUDA_DEFAULT_PATH.to_owned(), + Err(env::VarError::NotPresent) => os::LIBCUDA_DEFAULT_PATH.to_string(), Err(e) => { logger.log(log::LogEntry::ErrorBox(Box::new(e) as _)); - os::LIBCUDA_DEFAULT_PATH.to_owned() + os::LIBCUDA_DEFAULT_PATH.to_string() } Ok(env_string) => env_string, }; @@ -302,7 +297,7 @@ where // alternatively we could return a CUDA error, but I think it's fine to // crash. This is a diagnostic utility, if the lock was poisoned we can't // extract any useful trace or logging anyway - let mut global_state = &mut *global_state_mutex.lock().unwrap(); + let global_state = &mut *global_state_mutex.lock().unwrap(); let (mut logger, delayed_state) = match global_state.delayed_state { LateInit::Success(ref mut delayed_state) => ( global_state.log_factory.get_logger(func, arguments_writer), @@ -325,7 +320,7 @@ where logger.log(log::LogEntry::ErrorBox( format!("No function {} in the underlying CUDA library", func).into(), )); - CUresult::CUDA_ERROR_UNKNOWN + CUresult::ERROR_UNKNOWN } }; logger.result = maybe_cu_result; @@ -359,7 +354,7 @@ pub(crate) fn cuModuleLoad_Post( state: &mut trace::StateTracker, result: CUresult, ) { - if result != CUresult::CUDA_SUCCESS { + if result.is_err() { return; } state.record_new_module_file(unsafe { *module }, fname, fn_logger) @@ -373,7 +368,7 @@ pub(crate) fn cuModuleLoadData_Post( state: &mut trace::StateTracker, result: CUresult, ) { - if result != CUresult::CUDA_SUCCESS { + if result.is_err() { return; } state.record_new_module(unsafe { *module }, raw_image, fn_logger) @@ -401,7 +396,7 @@ pub(crate) fn cuGetExportTable_Post( state: &mut trace::StateTracker, result: CUresult, ) { - if result != CUresult::CUDA_SUCCESS { + if result.is_err() { return; } dark_api::override_export_table(ppExportTable, pExportTableId, state) @@ -451,7 +446,7 @@ pub(crate) fn cuModuleLoadFatBinary_Post( _state: &mut trace::StateTracker, result: CUresult, ) { - if result == CUresult::CUDA_SUCCESS { + if result.is_ok() { panic!() } } diff --git a/zluda_dump/src/os_unix.rs b/zluda_dump/src/os_unix.rs index e1e516bb..a4f29b28 100644 --- a/zluda_dump/src/os_unix.rs +++ b/zluda_dump/src/os_unix.rs @@ -1,8 +1,8 @@ -use crate::cuda::CUuuid; +use cuda_types::CUuuid; use std::ffi::{c_void, CStr, CString}; use std::mem; -pub(crate) const LIBCUDA_DEFAULT_PATH: &'static str = b"/usr/lib/x86_64-linux-gnu/libcuda.so.1\0"; +pub(crate) const LIBCUDA_DEFAULT_PATH: &str = "/usr/lib/x86_64-linux-gnu/libcuda.so.1"; pub unsafe fn load_library(libcuda_path: &str) -> *mut c_void { let libcuda_path = CString::new(libcuda_path).unwrap(); diff --git a/zluda_dump/src/side_by_side.rs b/zluda_dump/src/side_by_side.rs index 33954b88..11789cad 100644 --- a/zluda_dump/src/side_by_side.rs +++ b/zluda_dump/src/side_by_side.rs @@ -56,8 +56,10 @@ impl CudaDynamicFns { } macro_rules! emit_cuda_fn_table { - ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path);*) => { + ($($abi:literal fn $fn_name:ident( $($arg_id:ident : $arg_type:ty),* ) -> $ret_type:path;)*) => { #[derive(Default)] + #[allow(improper_ctypes)] + #[allow(improper_ctypes_definitions)] struct CudaFnTable { $($fn_name: DynamicFn $ret_type>),* } @@ -74,4 +76,4 @@ macro_rules! emit_cuda_fn_table { }; } -cuda_function_declarations!(cuda_types, emit_cuda_fn_table, emit_cuda_fn_table, []); +cuda_function_declarations!(emit_cuda_fn_table); diff --git a/zluda_dump/src/trace.rs b/zluda_dump/src/trace.rs index 59a0a0f6..9002acaa 100644 --- a/zluda_dump/src/trace.rs +++ b/zluda_dump/src/trace.rs @@ -1,6 +1,3 @@ -use ptx::{ast::PtxError, Token}; -use ptx::{DisplayParseError, ModuleParserExt}; - use crate::{dark_api, log, Settings}; use cuda_types::CUmodule; use std::{ @@ -172,7 +169,7 @@ impl StateTracker { submodule_index: Option, module_text: &str, ) { - let (_ast, errors) = ptx::ModuleParser::parse_unchecked(module_text); + let errors = ptx_parser::parse_for_errors(module_text); if !errors.is_empty() { fn_logger.log(log::LogEntry::ModuleParsingError( DumpWriter::get_file_name(module_index, version, submodule_index, "log"), @@ -232,7 +229,7 @@ impl DumpWriter { module_index: usize, version: Option, submodule_index: Option, - errors: &[ptx::ParseError, PtxError>], + errors: &[ptx_parser::PtxError<'input>], ) -> io::Result<()> { let mut log_file = match &self.dump_dir { None => return Ok(()), @@ -246,8 +243,7 @@ impl DumpWriter { )); let mut file = File::create(log_file)?; for error in errors { - let pretty_print_error = unsafe { DisplayParseError::new(error, module_text) }; - writeln!(file, "{}", pretty_print_error)?; + writeln!(file, "{}", error)?; } Ok(()) } diff --git a/zluda_lib/Cargo.toml b/zluda_lib/Cargo.toml deleted file mode 100644 index 54a2d53c..00000000 --- a/zluda_lib/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "zluda_lib" -version = "0.0.0" -authors = ["Andrzej Janik "] -edition = "2018" - -[lib] -name = "nvcuda" -crate-type = ["cdylib"] - -[dependencies] -zluda = { path = "../zluda" } diff --git a/zluda_lib/README.md b/zluda_lib/README.md deleted file mode 100644 index a5441475..00000000 --- a/zluda_lib/README.md +++ /dev/null @@ -1 +0,0 @@ -This project exist solely as a workaround, to make sure that ZLUDA-created CUDA driver does not clash with real CUDA driver when running unit tests \ No newline at end of file diff --git a/zluda_lib/src/lib.rs b/zluda_lib/src/lib.rs deleted file mode 100644 index a489d98f..00000000 --- a/zluda_lib/src/lib.rs +++ /dev/null @@ -1,11 +0,0 @@ -pub extern crate zluda; - -pub use zluda::cuda::*; - -// For some reason, on Linux linker strips out all our re-exports, -// there's probably a cleaner solution, but for now just exporting -// the function below stops it from doing so -#[no_mangle] -fn _zluda_very_bad_linker_hack() { - cuInit(0); -} diff --git a/zluda_ml/Cargo.toml b/zluda_ml/Cargo.toml index 4c540b41..d2e2378f 100644 --- a/zluda_ml/Cargo.toml +++ b/zluda_ml/Cargo.toml @@ -2,12 +2,8 @@ name = "zluda_ml" version = "0.0.0" authors = ["Andrzej Janik "] -edition = "2018" +edition = "2021" [lib] name = "nvml" crate-type = ["cdylib"] - -[dependencies.ocl-core] -version = "0.11" -features = ["opencl_version_1_2", "opencl_version_2_0", "opencl_version_2_1"] \ No newline at end of file diff --git a/zluda_ml/src/impl.rs b/zluda_ml/src/impl.rs index 2f820080..35f37780 100644 --- a/zluda_ml/src/impl.rs +++ b/zluda_ml/src/impl.rs @@ -1,23 +1,5 @@ -use std::{ - os::raw::{c_char, c_uint}, - ptr, -}; - use crate::nvml::nvmlReturn_t; -const VERSION: &'static [u8] = b"418.40.04"; - -macro_rules! stringify_nmvlreturn_t { - ($x:ident => [ $($variant:ident),+ ]) => { - match $x { - $( - nvmlReturn_t::$variant => Some(concat!(stringify!($variant), "\0")), - )+ - _ => None - } - } -} - #[cfg(debug_assertions)] pub(crate) fn unimplemented() -> nvmlReturn_t { unimplemented!() @@ -27,119 +9,3 @@ pub(crate) fn unimplemented() -> nvmlReturn_t { pub(crate) fn unimplemented() -> nvmlReturn_t { nvmlReturn_t::NVML_ERROR_NOT_SUPPORTED } - -pub(crate) fn error_string(result: nvmlReturn_t) -> *const ::std::os::raw::c_char { - let text = stringify_nmvlreturn_t!( - result => [ - NVML_SUCCESS, - NVML_ERROR_UNINITIALIZED, - NVML_ERROR_INVALID_ARGUMENT, - NVML_ERROR_NOT_SUPPORTED, - NVML_ERROR_NO_PERMISSION, - NVML_ERROR_ALREADY_INITIALIZED, - NVML_ERROR_NOT_FOUND, - NVML_ERROR_INSUFFICIENT_SIZE, - NVML_ERROR_INSUFFICIENT_POWER, - NVML_ERROR_DRIVER_NOT_LOADED, - NVML_ERROR_TIMEOUT, - NVML_ERROR_IRQ_ISSUE, - NVML_ERROR_LIBRARY_NOT_FOUND, - NVML_ERROR_FUNCTION_NOT_FOUND, - NVML_ERROR_CORRUPTED_INFOROM, - NVML_ERROR_GPU_IS_LOST, - NVML_ERROR_RESET_REQUIRED, - NVML_ERROR_OPERATING_SYSTEM, - NVML_ERROR_LIB_RM_VERSION_MISMATCH, - NVML_ERROR_IN_USE, - NVML_ERROR_MEMORY, - NVML_ERROR_NO_DATA, - NVML_ERROR_VGPU_ECC_NOT_SUPPORTED, - NVML_ERROR_INSUFFICIENT_RESOURCES, - NVML_ERROR_UNKNOWN - ] - ); - match text { - Some(text) => text.as_ptr() as *const _, - None => ptr::null(), - } -} - -pub(crate) fn shutdown() -> nvmlReturn_t { - nvmlReturn_t::NVML_SUCCESS -} - -static mut DEVICE: Option = None; - -pub(crate) fn init() -> Result<(), nvmlReturn_t> { - let platforms = ocl_core::get_platform_ids()?; - let device = platforms.iter().find_map(|plat| { - let devices = ocl_core::get_device_ids(plat, Some(ocl_core::DeviceType::GPU), None).ok()?; - for dev in devices { - let vendor = ocl_core::get_device_info(dev, ocl_core::DeviceInfo::VendorId).ok()?; - match vendor { - ocl_core::DeviceInfoResult::VendorId(0x8086) - | ocl_core::DeviceInfoResult::VendorId(0x1002) => {} - _ => continue, - }; - let dev_type = ocl_core::get_device_info(dev, ocl_core::DeviceInfo::Type).ok()?; - if let ocl_core::DeviceInfoResult::Type(ocl_core::DeviceType::GPU) = dev_type { - return Some(dev); - } - } - None - }); - unsafe { DEVICE = device }; - if device.is_some() { - Ok(()) - } else { - Err(nvmlReturn_t::NVML_ERROR_UNKNOWN) - } -} - -pub(crate) fn init_with_flags() -> Result<(), nvmlReturn_t> { - init() -} - -impl From for nvmlReturn_t { - fn from(_: ocl_core::Error) -> Self { - nvmlReturn_t::NVML_ERROR_UNKNOWN - } -} - -impl From> for nvmlReturn_t { - fn from(result: Result<(), nvmlReturn_t>) -> Self { - match result { - Ok(()) => nvmlReturn_t::NVML_SUCCESS, - Err(e) => e, - } - } -} - -struct CountingWriter { - pub base: T, - pub len: usize, -} - -impl std::io::Write for CountingWriter { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - self.len += buf.len(); - self.base.write(buf) - } - - fn flush(&mut self) -> std::io::Result<()> { - self.base.flush() - } -} - -pub(crate) unsafe fn system_get_driver_version( - version_ptr: *mut c_char, - length: c_uint, -) -> Result<(), nvmlReturn_t> { - if version_ptr == ptr::null_mut() || length == 0 { - return Err(nvmlReturn_t::NVML_ERROR_INVALID_ARGUMENT); - } - let strlen = usize::min(VERSION.len(), (length as usize) - 1); - std::ptr::copy_nonoverlapping(VERSION.as_ptr(), version_ptr as _, strlen); - *version_ptr.add(strlen) = 0; - Ok(()) -} diff --git a/zluda_ml/src/nvml.rs b/zluda_ml/src/nvml.rs index cab546a2..0358bc61 100644 --- a/zluda_ml/src/nvml.rs +++ b/zluda_ml/src/nvml.rs @@ -1131,27 +1131,27 @@ pub use self::nvmlPcieLinkState_enum as nvmlPcieLinkState_t; #[no_mangle] pub extern "C" fn nvmlInit_v2() -> nvmlReturn_t { - crate::r#impl::init().into() + crate::r#impl::unimplemented() } #[no_mangle] pub extern "C" fn nvmlInit() -> nvmlReturn_t { - crate::r#impl::init().into() + crate::r#impl::unimplemented() } #[no_mangle] pub extern "C" fn nvmlInitWithFlags(flags: ::std::os::raw::c_uint) -> nvmlReturn_t { - crate::r#impl::init_with_flags().into() + crate::r#impl::unimplemented() } #[no_mangle] pub extern "C" fn nvmlShutdown() -> nvmlReturn_t { - crate::r#impl::shutdown() + crate::r#impl::unimplemented() } #[no_mangle] pub extern "C" fn nvmlErrorString(result: nvmlReturn_t) -> *const ::std::os::raw::c_char { - crate::r#impl::error_string(result) + c"".as_ptr() } #[no_mangle] @@ -1159,7 +1159,7 @@ pub unsafe extern "C" fn nvmlSystemGetDriverVersion( version: *mut ::std::os::raw::c_char, length: ::std::os::raw::c_uint, ) -> nvmlReturn_t { - crate::r#impl::system_get_driver_version(version, length).into() + crate::r#impl::unimplemented() } #[no_mangle]