Skip to content

Commit

Permalink
Add some basic test coverage for cuda.core.experimental (#153)
Browse files Browse the repository at this point in the history
Add basic test coverage for cuda.core.experimental
  • Loading branch information
ksimpson-work authored Oct 29, 2024
1 parent e426810 commit 213baf4
Show file tree
Hide file tree
Showing 12 changed files with 648 additions and 1 deletion.
2 changes: 1 addition & 1 deletion cuda_core/cuda/core/experimental/_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ class Program:
_supported_target_type = ("ptx", "cubin", "ltoir", )

def __init__(self, code, code_type):
self._handle = None
if code_type not in self._supported_code_type:
raise NotImplementedError
self._handle = None

if code_type.lower() == "c++":
if not isinstance(code, str):
Expand Down
16 changes: 16 additions & 0 deletions cuda_core/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Copyright 2024 NVIDIA Corporation. All rights reserved.
#
# Please refer to the NVIDIA end user license agreement (EULA) associated
# with this source code for terms and conditions that govern your use of
# this software. Any use, reproduction, disclosure, or distribution of
# this software and related documentation outside the terms of the EULA
# is strictly prohibited.

from cuda.core.experimental._device import Device
import pytest

@pytest.fixture(scope="module")
def init_cuda():
device = Device()
device.set_current()

Empty file.
25 changes: 25 additions & 0 deletions cuda_core/tests/example_tests/test_basic_examples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Copyright 2024 NVIDIA Corporation. All rights reserved.
#
# Please refer to the NVIDIA end user license agreement (EULA) associated
# with this source code for terms and conditions that govern your use of
# this software. Any use, reproduction, disclosure, or distribution of
# this software and related documentation outside the terms of the EULA
# is strictly prohibited.

# If we have subcategories of examples in the future, this file can be split along those lines

from .utils import run_example
import os
import glob
import pytest

samples_path = os.path.join(
os.path.dirname(__file__), '..', '..', 'examples')
sample_files = glob.glob(samples_path+'**/*.py', recursive=True)
@pytest.mark.parametrize(
'example', sample_files
)
class TestExamples:
def test_example(self, example):
filename = os.path.basename(example)
run_example(samples_path, example)
54 changes: 54 additions & 0 deletions cuda_core/tests/example_tests/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# Copyright 2024 NVIDIA Corporation. All rights reserved.
#
# Please refer to the NVIDIA end user license agreement (EULA) associated
# with this source code for terms and conditions that govern your use of
# this software. Any use, reproduction, disclosure, or distribution of
# this software and related documentation outside the terms of the EULA
# is strictly prohibited.

from cuda import cuda
import gc
import os
import sys
import pytest
import cupy as cp

class SampleTestError(Exception):
pass

def parse_python_script(filepath):
if not filepath.endswith('.py'):
raise ValueError(f"{filepath} not supported")
with open(filepath, "r", encoding='utf-8') as f:
script = f.read()
return script


def run_example(samples_path, filename, env=None):
fullpath = os.path.join(samples_path, filename)
script = parse_python_script(fullpath)
try:
old_argv = sys.argv
sys.argv = [fullpath]
old_sys_path = sys.path.copy()
sys.path.append(samples_path)
exec(script, env if env else {})
except ImportError as e:
# for samples requiring any of optional dependencies
for m in ('cupy',):
if f"No module named '{m}'" in str(e):
pytest.skip(f'{m} not installed, skipping related tests')
break
else:
raise
except Exception as e:
msg = "\n"
msg += f'Got error ({filename}):\n'
msg += str(e)
raise SampleTestError(msg) from e
finally:
sys.path = old_sys_path
sys.argv = old_argv
# further reduce the memory watermark
gc.collect()
cp.get_default_memory_pool().free_all_blocks()
67 changes: 67 additions & 0 deletions cuda_core/tests/test_device.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# Copyright 2024 NVIDIA Corporation. All rights reserved.
#
# Please refer to the NVIDIA end user license agreement (EULA) associated
# with this source code for terms and conditions that govern your use of
# this software. Any use, reproduction, disclosure, or distribution of
# this software and related documentation outside the terms of the EULA
# is strictly prohibited.

from cuda import cuda, cudart
from cuda.core.experimental._device import Device
from cuda.core.experimental._utils import handle_return, ComputeCapability, CUDAError, \
precondition
import pytest

def test_device_repr():
device = Device(0)
assert str(device).startswith('<Device 0')

def test_device_alloc(init_cuda):
device = Device()
buffer = device.allocate(1024)
device.sync()
assert buffer.handle != 0
assert buffer.size == 1024
assert buffer.device_id == 0

def test_device_set_current():
device = Device()
device.set_current()

def test_device_create_stream():
device = Device()
stream = device.create_stream()
assert stream is not None
assert stream.handle

def test_pci_bus_id():
device = Device()
bus_id = handle_return(cudart.cudaDeviceGetPCIBusId(13, device.device_id))
assert device.pci_bus_id == bus_id[:12].decode()

def test_uuid():
device = Device()
driver_ver = handle_return(cuda.cuDriverGetVersion())
if driver_ver >= 11040:
uuid = handle_return(cuda.cuDeviceGetUuid_v2(device.device_id))
else:
uuid = handle_return(cuda.cuDeviceGetUuid(device.device_id))
uuid = uuid.bytes.hex()
expected_uuid = f"{uuid[:8]}-{uuid[8:12]}-{uuid[12:16]}-{uuid[16:20]}-{uuid[20:]}"
assert device.uuid == expected_uuid

def test_name():
device = Device()
name = handle_return(cuda.cuDeviceGetName(128, device.device_id))
name = name.split(b'\0')[0]
assert device.name == name.decode()

def test_compute_capability():
device = Device()
major = handle_return(cudart.cudaDeviceGetAttribute(
cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, device.device_id))
minor = handle_return(cudart.cudaDeviceGetAttribute(
cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, device.device_id))
expected_cc = ComputeCapability(major, minor)
assert device.compute_capability == expected_cc

39 changes: 39 additions & 0 deletions cuda_core/tests/test_event.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Copyright 2024 NVIDIA Corporation. All rights reserved.
#
# Please refer to the NVIDIA end user license agreement (EULA) associated
# with this source code for terms and conditions that govern your use of
# this software. Any use, reproduction, disclosure, or distribution of
# this software and related documentation outside the terms of the EULA
# is strictly prohibited.

from cuda import cuda
from cuda.core.experimental._event import EventOptions, Event
from cuda.core.experimental._utils import handle_return
from cuda.core.experimental._device import Device
import pytest

def test_is_timing_disabled():
options = EventOptions(enable_timing=False)
event = Event._init(options)
assert event.is_timing_disabled == True

def test_is_sync_busy_waited():
options = EventOptions(busy_waited_sync=True)
event = Event._init(options)
assert event.is_sync_busy_waited == True

def test_sync():
options = EventOptions()
event = Event._init(options)
event.sync()
assert event.is_done == True

def test_is_done():
options = EventOptions()
event = Event._init(options)
assert event.is_done == True

def test_handle():
options = EventOptions()
event = Event._init(options)
assert isinstance(event.handle, int)
66 changes: 66 additions & 0 deletions cuda_core/tests/test_launcher.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# Copyright 2024 NVIDIA Corporation. All rights reserved.
#
# Please refer to the NVIDIA end user license agreement (EULA) associated
# with this source code for terms and conditions that govern your use of
# this software. Any use, reproduction, disclosure, or distribution of
# this software and related documentation outside the terms of the EULA
# is strictly prohibited.

from cuda import cuda
from cuda.core.experimental._launcher import LaunchConfig
from cuda.core.experimental._stream import Stream
from cuda.core.experimental._device import Device
from cuda.core.experimental._utils import handle_return
import pytest

def test_launch_config_init():
config = LaunchConfig(grid=(1, 1, 1), block=(1, 1, 1), stream=None, shmem_size=0)
assert config.grid == (1, 1, 1)
assert config.block == (1, 1, 1)
assert config.stream is None
assert config.shmem_size == 0

config = LaunchConfig(grid=(2, 2, 2), block=(2, 2, 2), stream=Device().create_stream(), shmem_size=1024)
assert config.grid == (2, 2, 2)
assert config.block == (2, 2, 2)
assert isinstance(config.stream, Stream)
assert config.shmem_size == 1024

def test_launch_config_cast_to_3_tuple():
config = LaunchConfig(grid=1, block=1)
assert config._cast_to_3_tuple(1) == (1, 1, 1)
assert config._cast_to_3_tuple((1, 2)) == (1, 2, 1)
assert config._cast_to_3_tuple((1, 2, 3)) == (1, 2, 3)

# Edge cases
assert config._cast_to_3_tuple(999) == (999, 1, 1)
assert config._cast_to_3_tuple((999, 888)) == (999, 888, 1)
assert config._cast_to_3_tuple((999, 888, 777)) == (999, 888, 777)

def test_launch_config_invalid_values():
with pytest.raises(ValueError):
LaunchConfig(grid=0, block=1)

with pytest.raises(ValueError):
LaunchConfig(grid=(0, 1), block=1)

with pytest.raises(ValueError):
LaunchConfig(grid=(1, 1, 1), block=0)

with pytest.raises(ValueError):
LaunchConfig(grid=(1, 1, 1), block=(0, 1))

def test_launch_config_stream():
stream = Device().create_stream()
config = LaunchConfig(grid=(1, 1, 1), block=(1, 1, 1), stream=stream, shmem_size=0)
assert config.stream == stream

with pytest.raises(ValueError):
LaunchConfig(grid=(1, 1, 1), block=(1, 1, 1), stream="invalid_stream", shmem_size=0)

def test_launch_config_shmem_size():
config = LaunchConfig(grid=(1, 1, 1), block=(1, 1, 1), stream=None, shmem_size=2048)
assert config.shmem_size == 2048

config = LaunchConfig(grid=(1, 1, 1), block=(1, 1, 1), stream=None)
assert config.shmem_size == 0
Loading

0 comments on commit 213baf4

Please sign in to comment.