From 320f6f460c1adf5810da05fc4e71f5b62c6e9e48 Mon Sep 17 00:00:00 2001 From: Alexandros Nikolaos Ziogas Date: Thu, 27 Jul 2023 21:55:14 +0200 Subject: [PATCH 1/5] Removed erroneous numba.prange. --- npbench/benchmarks/polybench/cholesky/cholesky_numba_npr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/npbench/benchmarks/polybench/cholesky/cholesky_numba_npr.py b/npbench/benchmarks/polybench/cholesky/cholesky_numba_npr.py index de53dcd..fb79194 100644 --- a/npbench/benchmarks/polybench/cholesky/cholesky_numba_npr.py +++ b/npbench/benchmarks/polybench/cholesky/cholesky_numba_npr.py @@ -7,7 +7,7 @@ def kernel(A): A[0, 0] = np.sqrt(A[0, 0]) for i in range(1, A.shape[0]): - for j in nb.prange(i): + for j in range(i): A[i, j] -= np.dot(A[i, :j], A[j, :j]) A[i, j] /= A[j, j] A[i, i] -= np.dot(A[i, :i], A[i, :i]) From b24d04a3ac60f7b5368db1e39b0825b13abab7f6 Mon Sep 17 00:00:00 2001 From: Alexandros Nikolaos Ziogas Date: Thu, 27 Jul 2023 21:56:09 +0200 Subject: [PATCH 2/5] Renamed numba sample. --- .../cholesky/{cholesky_numba_npr.py => cholesky_numba_np.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename npbench/benchmarks/polybench/cholesky/{cholesky_numba_npr.py => cholesky_numba_np.py} (100%) diff --git a/npbench/benchmarks/polybench/cholesky/cholesky_numba_npr.py b/npbench/benchmarks/polybench/cholesky/cholesky_numba_np.py similarity index 100% rename from npbench/benchmarks/polybench/cholesky/cholesky_numba_npr.py rename to npbench/benchmarks/polybench/cholesky/cholesky_numba_np.py From 99dc8f04b3a433362a47afc382e1c20f1b2638cf Mon Sep 17 00:00:00 2001 From: Alexandros Nikolaos Ziogas Date: Thu, 27 Jul 2023 21:56:43 +0200 Subject: [PATCH 3/5] Fixed string, framework used. --- npbench/infrastructure/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/npbench/infrastructure/test.py b/npbench/infrastructure/test.py index da0eafd..bccc3f7 100644 --- a/npbench/infrastructure/test.py +++ b/npbench/infrastructure/test.py @@ -43,8 +43,8 @@ def _execute(self, frmwrk: Framework, impl: Callable, impl_name: str, mode: str, out = [out] else: out = [] - if "out_args" in self.bench.info.keys(): - out += [ldict[a] for a in self.frmwrk.args(self.bench)] + if "output_args" in self.bench.info.keys(): + out += [ldict[a] for a in frmwrk.args(self.bench)] return out, timelist def run(self, preset: str, validate: bool, repeat: int, timeout: float = 200.0, ignore_errors: bool = True): From 54c3f0e4530299d91578560582948545160abb04 Mon Sep 17 00:00:00 2001 From: Alexandros Nikolaos Ziogas Date: Thu, 27 Jul 2023 21:57:50 +0200 Subject: [PATCH 4/5] Amended utility method to properly populate context dictionary with all arguments. --- npbench/infrastructure/utilities.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/npbench/infrastructure/utilities.py b/npbench/infrastructure/utilities.py index d9f870e..7bdf810 100644 --- a/npbench/infrastructure/utilities.py +++ b/npbench/infrastructure/utilities.py @@ -134,16 +134,20 @@ def inner(_it, _timer{init}): def benchmark(stmt, setup="pass", out_text="", repeat=1, context={}, output=None, verbose=True): - timeit.template = timeit_tmpl.format(init='{init}', setup='{setup}', stmt='{stmt}', output=output) - ldict = {**context} - output = timeit.repeat(stmt, setup=setup, repeat=repeat, number=1, globals=ldict) - res = output[0][1] - raw_time_list = [a for a, _ in output] + raw_time_list = timeit.repeat(stmt, setup=setup, repeat=repeat, number=1, globals=ldict) raw_time = np.median(raw_time_list) ms_time = time_to_ms(raw_time) if verbose: print("{}: {}ms".format(out_text, ms_time)) + + if output is not None: + exec(setup, context) + exec(stmt, context) + res = context[output] + else: + res = None + return res, raw_time_list From 988dc1b6f0fcb575ae14fbeba6889f4459e70712 Mon Sep 17 00:00:00 2001 From: Alexandros Nikolaos Ziogas Date: Tue, 14 Nov 2023 00:04:45 +0100 Subject: [PATCH 5/5] Using apply_gpu_storage and auto-opt with use_gpu_storage. --- npbench/infrastructure/dace_framework.py | 25 ++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/npbench/infrastructure/dace_framework.py b/npbench/infrastructure/dace_framework.py index 2c007ad..2a5afec 100644 --- a/npbench/infrastructure/dace_framework.py +++ b/npbench/infrastructure/dace_framework.py @@ -185,14 +185,14 @@ def parallelize(sdfg): try: def autoopt(sdfg, device, symbols): #, nofuse): - # Mark arrays as on the GPU - if device == dtypes.DeviceType.GPU: - for k, v in sdfg.arrays.items(): - if not v.transient and type(v) == dace.data.Array: - v.storage = dace.dtypes.StorageType.GPU_Global + # # Mark arrays as on the GPU + # if device == dtypes.DeviceType.GPU: + # for k, v in sdfg.arrays.items(): + # if not v.transient and type(v) == dace.data.Array: + # v.storage = dace.dtypes.StorageType.GPU_Global # Auto-optimize SDFG - opt.auto_optimize(auto_opt_sdfg, device, symbols=symbols) + opt.auto_optimize(auto_opt_sdfg, device, symbols=symbols, use_gpu_storage=True) auto_opt_sdfg = copy.deepcopy(strict_sdfg) auto_opt_sdfg._name = 'auto_opt' @@ -229,9 +229,10 @@ def vectorize(sdfg, vec_len=None): dace.Config.set('library', 'blas', 'default_implementation', value='cuBLAS') def copy_to_gpu(sdfg): - for k, v in sdfg.arrays.items(): - if not v.transient and isinstance(v, dace.data.Array): - v.storage = dace.dtypes.StorageType.GPU_Global + opt.apply_gpu_storage(sdfg) + # for k, v in sdfg.arrays.items(): + # if not v.transient and isinstance(v, dace.data.Array): + # v.storage = dace.dtypes.StorageType.GPU_Global if self.info["arch"] == "gpu": import cupy as cp @@ -242,9 +243,9 @@ def copy_to_gpu(sdfg): fe_time = t if sdfg._name != 'auto_opt': device = dtypes.DeviceType.GPU if self.info["arch"] == "gpu" else dtypes.DeviceType.CPU - if self.info["arch"] == "cpu": - # GPUTransform will set GPU schedules by itself - opt.set_fast_implementations(sdfg, device) + # if self.info["arch"] == "cpu": + # # GPUTransform will set GPU schedules by itself + opt.set_fast_implementations(sdfg, device) if self.info["arch"] == "gpu": if sdfg._name in ['strict', 'parallel', 'fusion']: _, gpu_time1 = util.benchmark("copy_to_gpu(sdfg)",