Skip to content

Commit

Permalink
[e2e] use testset.py to specify testset and testing more in e2e (#384)
Browse files Browse the repository at this point in the history
* use `testset.py` to specify testset
* testing `profiler.py` and `gen_brt_tests.py` in e2e
* generate `testcase.json` in `gen_brt_tests.py`
  • Loading branch information
qingyunqu authored Jul 2, 2024
1 parent ed85193 commit a6fe5ec
Show file tree
Hide file tree
Showing 5 changed files with 131 additions and 103 deletions.
12 changes: 12 additions & 0 deletions tests/build_and_test_e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,18 @@ pip3 install $ROOT_PROJ_DIR/runtime/python/dist/*.whl --force-reinstall
pip3 install $ROOT_PROJ_DIR/frontends/torch-frontend/build/torch-frontend/python/dist/*.whl --force-reinstall
pip3 install -r $ROOT_PROJ_DIR/frontends/torch-frontend/torch-requirements.txt
pip3 install flash_attn==2.5.3

# numerical test
python3 tests/numerical_test/main.py --target all
rm -rf ./local_test

# profiler test
python3 tests/numerical_test/profiler.py $ROOT_PROJ_DIR/tests/numerical_test/mlir_tests/cpu_ops/add.mlir --target cpu
python3 tests/numerical_test/profiler.py $ROOT_PROJ_DIR/tests/numerical_test/mlir_tests/ops/add.mlir --target cuda
rm -rf ./local_profiling

# generate compitibility test
python3 tests/numerical_test/gen_brt_tests.py
rm -rf ./local_golden

popd
34 changes: 22 additions & 12 deletions tests/numerical_test/gen_brt_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,12 @@
import sys
import shutil
import traceback
import json

import numpy as np
from execute import MLIRDataGenerator
from reporting import TestResult, report_results
from testset import CPU_MLIR_TEST_DIR, CPU_MLIR_TEST_SET, CPU_XFAIL_SET
import byteir

parser = argparse.ArgumentParser()
Expand All @@ -47,11 +49,6 @@
help="Byre serialization target version")
args = parser.parse_args()

# Unsupported ops
EXCLUDE_MLIR_CPU_TESTS = [
"custom_call_tf_UpperBound.mlir",
"rng.mlir",
]

def gen_golden_mlir(mhlo_file, target, golden_dir, num=2):
"""
Expand All @@ -66,6 +63,8 @@ def save_np_data(fpath: str, data):

file_base_name = os.path.basename(mhlo_file).split(".")[0]
unique_name = file_base_name + "." + target
json_relative_dir_path = "./" + os.path.basename(golden_dir) + "/" + unique_name
json_result = {unique_name : {}}
try:
data_generator = MLIRDataGenerator(mhlo_file, target)
func_name = data_generator.entry_func_name
Expand All @@ -77,6 +76,8 @@ def save_np_data(fpath: str, data):
if data_generator.need_special_inputs():
num = 1

input_file_path = []
output_file_path = []
for idx in range(0, num):
np_inputs = data_generator.generate_np_inputs()

Expand All @@ -88,11 +89,16 @@ def save_np_data(fpath: str, data):
# dump to local file
save_np_data(WORK_FOLDER + f"/inputs.{str(idx)}.npz", np_inputs)
save_np_data(WORK_FOLDER + f"/outputs.{str(idx)}.npz", golden_outputs)
input_file_path.append(json_relative_dir_path + f"/inputs.{str(idx)}.npz")
output_file_path.append(json_relative_dir_path + f"/outputs.{str(idx)}.npz")

del np_inputs, golden_outputs
json_result[unique_name].update({"golden_inputs": input_file_path})
json_result[unique_name].update({"golden_outputs": output_file_path})

# byteir compile
output_mlir_file_name = f"{WORK_FOLDER}/{unique_name}.rt.mlirbc"
json_result[unique_name].update({"brt_entry_file" : json_relative_dir_path + f"/{unique_name}.rt.mlirbc"})
byteir.compile(
mhlo_file, output_mlir_file_name, entry_func=func_name, target=target
)
Expand All @@ -116,7 +122,7 @@ def save_np_data(fpath: str, data):
runtime_error=None,
numerical_error=None,
performance_result=None,
)
), None

res = TestResult(
unique_name=unique_name,
Expand All @@ -126,13 +132,11 @@ def save_np_data(fpath: str, data):
performance_result=None,
)

return res

return res, json_result


def gen_mlir_cpu_golden():
directory = os.path.dirname(os.path.realpath(__file__))
directory = directory + "/mlir_tests/cpu_ops"
directory = CPU_MLIR_TEST_DIR
cpu_target = "cpu"
os.makedirs(args.output_dir, exist_ok=True)
golden_dir = f"{args.output_dir}/CPU_BYRE_{args.byre_serial_version.replace('.', '_')}"
Expand All @@ -146,16 +150,22 @@ def gen_mlir_cpu_golden():
continue
f = os.path.join(directory, filename)
# checking if it is a file
if os.path.isfile(f) and filename not in EXCLUDE_MLIR_CPU_TESTS:
if os.path.isfile(f) and filename in (CPU_MLIR_TEST_SET - CPU_XFAIL_SET):
mlir_tests.append(f)

results = []
byre_version_str = f"byre{args.byre_serial_version}"
json_results = {"cpu" : {byre_version_str : {}}}
for test in mlir_tests:
fpath = test
res = gen_golden_mlir(fpath,
res, key_value = gen_golden_mlir(fpath,
cpu_target,
golden_dir)
results.append(res)
if key_value is not None:
json_results["cpu"][byre_version_str].update(key_value)
with open(f"{args.output_dir}/testcase.json", 'w') as f:
json.dump(json_results, f, indent=4)
return results


Expand Down
91 changes: 2 additions & 89 deletions tests/numerical_test/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,95 +24,8 @@
GLOBAL_TORCH_TEST_REGISTRY,
GLOBAL_TORCH_TEST_REGISTRY_NAMES,
)
from torch_e2e_testing.test_suite import register_all_torch_tests

register_all_torch_tests()

CUR_DIR = os.path.dirname(os.path.abspath(__file__))


def _get_test_files_from_dir(directory):
test_files = []
for filename in os.listdir(directory):
if filename.startswith("."):
continue
if os.path.isfile(os.path.join(directory, filename)):
test_files.append(filename)
return test_files


##### CPU TEST SET #######
CPU_MLIR_TEST_DIR = os.path.join(CUR_DIR, "mlir_tests", "cpu_ops")
CPU_MLIR_TEST_SET = set(_get_test_files_from_dir(CPU_MLIR_TEST_DIR))
CPU_TORCH_TEST_SET = set()
CPU_XFAIL_SET = {
"custom_call_tf_UpperBound.mlir",
"rng.mlir",
}

CPU_ALL_SET = (CPU_MLIR_TEST_SET | CPU_TORCH_TEST_SET) - CPU_XFAIL_SET

##### CUDA TEST SET #######
CUDA_MLIR_TEST_DIR = os.path.join(CUR_DIR, "mlir_tests", "ops")
CUDA_MLIR_TEST_SET = set(_get_test_files_from_dir(CUDA_MLIR_TEST_DIR))
CUDA_TORCH_TEST_SET = set(GLOBAL_TORCH_TEST_REGISTRY_NAMES)
CUDA_XFAIL_SET = {
"bmm_rcr.mlir",
"bmm_rrc.mlir",
"bmm_rrr_add_f16.mlir",
"bmm_rrr_f16.mlir",
"bmm_rrr_permute_f16.mlir",
"bmm_rrr_permute_f32.mlir",
"layernorm.mlir",
"softmax.mlir",
"transpose102.mlir",
"transpose1023.mlir",
"transpose120.mlir",
"transpose1203.mlir",
"transpose2013.mlir",
"transpose120.mlir",
}

CUDA_ALL_SET = (CUDA_MLIR_TEST_SET | CUDA_TORCH_TEST_SET) - CUDA_XFAIL_SET

##### CUDA AIT TEST SET #######
CUDA_AIT_MLIR_TEST_SET = {
"bmm_rcr.mlir",
"bmm_rrc.mlir",
"bmm_rrr_add_f16.mlir",
"bmm_rrr_f16.mlir",
"bmm_rrr_permute_f16.mlir",
"bmm_rrr_permute_f32.mlir",
"gemm_crr_f16.mlir",
"gemm_rrr_f16.mlir",
"gemm_rrr_f32.mlir",
"layernorm.mlir",
"softmax.mlir",
"transpose2d.mlir",
"transpose102.mlir",
"transpose1023.mlir",
"transpose120.mlir",
"transpose1203.mlir",
"transpose2013.mlir",
"transpose120.mlir",
}
CUDA_AIT_TORCH_TEST_SET = {
"MatmulF16Module_basic",
"MatmulTransposeModule_basic",
"MatmulF32Module_basic",
"BatchMatmulF32Module_basic",
"BatchMatmulAddF32Module_basic",
}
CUDA_AIT_SM80PLUS_SET = {
"gemm_rrr_f32.mlir",
"bmm_rrr_permute_f16.mlir",
"bmm_rrr_permute_f32.mlir",
"MatmulF32Module_basic",
"BatchMatmulF32Module_basic",
"BatchMatmulAddF32Module_basic",
}

CUDA_AIT_ALL_SET = CUDA_AIT_MLIR_TEST_SET | CUDA_AIT_TORCH_TEST_SET
from testset import CPU_MLIR_TEST_DIR, CUDA_MLIR_TEST_DIR
from testset import CPU_ALL_SET, CUDA_ALL_SET, CUDA_AIT_ALL_SET, CUDA_AIT_SM80PLUS_SET

##### TEST SET CONFIG #######
TEST_SET = {
Expand Down
4 changes: 2 additions & 2 deletions tests/numerical_test/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_mlir_path")
parser.add_argument("--workdir", type=str, default="./profiling", help="workspace directory")
parser.add_argument("--name", type=str, default="model")
parser.add_argument("--workdir", type=str, default="./local_profiling", help="workspace directory")
parser.add_argument("--name", type=str, default=None)
parser.add_argument("--target", type=str, default="cuda", choices=["cpu", "cuda", "cuda_with_ait"])
parser.add_argument("--mode", type=str, default="profile", choices=["numerical", "profile"])
parser.add_argument("-v", "--verbose", default=False, action="store_true")
Expand Down
93 changes: 93 additions & 0 deletions tests/numerical_test/testset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
import os
from torch_e2e_testing.registry import (
GLOBAL_TORCH_TEST_REGISTRY,
GLOBAL_TORCH_TEST_REGISTRY_NAMES,
)
from torch_e2e_testing.test_suite import register_all_torch_tests

register_all_torch_tests()

CUR_DIR = os.path.dirname(os.path.abspath(__file__))

def _get_test_files_from_dir(directory):
test_files = []
for filename in os.listdir(directory):
if filename.startswith("."):
continue
if os.path.isfile(os.path.join(directory, filename)):
test_files.append(filename)
return test_files


##### CPU TEST SET #######
CPU_MLIR_TEST_DIR = os.path.join(CUR_DIR, "mlir_tests", "cpu_ops")
CPU_MLIR_TEST_SET = set(_get_test_files_from_dir(CPU_MLIR_TEST_DIR))
CPU_TORCH_TEST_SET = set()
CPU_XFAIL_SET = {
"custom_call_tf_UpperBound.mlir",
"rng.mlir",
}

CPU_ALL_SET = (CPU_MLIR_TEST_SET | CPU_TORCH_TEST_SET) - CPU_XFAIL_SET

##### CUDA TEST SET #######
CUDA_MLIR_TEST_DIR = os.path.join(CUR_DIR, "mlir_tests", "ops")
CUDA_MLIR_TEST_SET = set(_get_test_files_from_dir(CUDA_MLIR_TEST_DIR))
CUDA_TORCH_TEST_SET = set(GLOBAL_TORCH_TEST_REGISTRY_NAMES)
CUDA_XFAIL_SET = {
"bmm_rcr.mlir",
"bmm_rrc.mlir",
"bmm_rrr_add_f16.mlir",
"bmm_rrr_f16.mlir",
"bmm_rrr_permute_f16.mlir",
"bmm_rrr_permute_f32.mlir",
"layernorm.mlir",
"softmax.mlir",
"transpose102.mlir",
"transpose1023.mlir",
"transpose120.mlir",
"transpose1203.mlir",
"transpose2013.mlir",
"transpose120.mlir",
}

CUDA_ALL_SET = (CUDA_MLIR_TEST_SET | CUDA_TORCH_TEST_SET) - CUDA_XFAIL_SET

##### CUDA AIT TEST SET #######
CUDA_AIT_MLIR_TEST_SET = {
"bmm_rcr.mlir",
"bmm_rrc.mlir",
"bmm_rrr_add_f16.mlir",
"bmm_rrr_f16.mlir",
"bmm_rrr_permute_f16.mlir",
"bmm_rrr_permute_f32.mlir",
"gemm_crr_f16.mlir",
"gemm_rrr_f16.mlir",
"gemm_rrr_f32.mlir",
"layernorm.mlir",
"softmax.mlir",
"transpose2d.mlir",
"transpose102.mlir",
"transpose1023.mlir",
"transpose120.mlir",
"transpose1203.mlir",
"transpose2013.mlir",
"transpose120.mlir",
}
CUDA_AIT_TORCH_TEST_SET = {
"MatmulF16Module_basic",
"MatmulTransposeModule_basic",
"MatmulF32Module_basic",
"BatchMatmulF32Module_basic",
"BatchMatmulAddF32Module_basic",
}
CUDA_AIT_SM80PLUS_SET = {
"gemm_rrr_f32.mlir",
"bmm_rrr_permute_f16.mlir",
"bmm_rrr_permute_f32.mlir",
"MatmulF32Module_basic",
"BatchMatmulF32Module_basic",
"BatchMatmulAddF32Module_basic",
}

CUDA_AIT_ALL_SET = CUDA_AIT_MLIR_TEST_SET | CUDA_AIT_TORCH_TEST_SET

0 comments on commit a6fe5ec

Please sign in to comment.