diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 596672765fc..d49dfd90c1e 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -426,6 +426,30 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for Qwen1.5 (32-32) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-32_437.yaml + + - name: Test on igpu for Qwen1.5 (32-32) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.37.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\32-32_437.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\32-32\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + - name: Concat csv and generate html (32-32) shell: cmd run: | @@ -449,7 +473,7 @@ jobs: shell: bash run: | sed -i 's/32-32/1024-128/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128.yaml - name: Test on igpu (1024-128) @@ -496,6 +520,30 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for Qwen 1.5 (1024-128) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/1024-128_437.yaml + + - name: Test on igpu for Qwen 1.5 (1024-128) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.37.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\1024-128_437.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\1024-128\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + - name: Concat csv and generate html (1024-128) shell: cmd run: | @@ -518,7 +566,7 @@ jobs: shell: bash run: | sed -i 's/1024-128/2048-256/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256.yaml - name: Test on igpu (2048-256) @@ -565,6 +613,30 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for Qwen 1.5 (2048-256) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/2048-256_437.yaml + + - name: Test on igpu for Qwen 1.5 (2048-256) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.37.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\2048-256_437.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\2048-256\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + - name: Concat csv and generate html (2048-256) shell: cmd run: | @@ -588,7 +660,7 @@ jobs: run: | # hide time info sed -i 's/2048-256/32-512/g' python/llm/dev/benchmark/all-in-one/run.py - sed -i 's/{today}_test2/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i 's/{today}_test3/{today}_test1/g' python/llm/dev/benchmark/all-in-one/run.py sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-512.yaml - name: Test on igpu (32-512) @@ -635,6 +707,30 @@ jobs: call conda deactivate + - name: Prepare igpu perf test for Qwen 1.5 (32-512) + shell: bash + run: | + sed -i 's/{today}_test2/{today}_test3/g' python/llm/dev/benchmark/all-in-one/run.py + sed -i "s/path to your local model hub/$MODEL_HUB_DIR/g" python/llm/test/benchmark/igpu-perf/32-512_437.yaml + + - name: Test on igpu for Qwen 1.5 (32-512) + shell: cmd + run: | + call conda activate igpu-perf + pip install transformers==4.37.0 + + call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + set SYCL_CACHE_PERSISTENT=1 + set BIGDL_LLM_XMX_DISABLED=1 + + cd python\llm\dev\benchmark\all-in-one + move ..\..\..\test\benchmark\igpu-perf\32-512_437.yaml config.yaml + set PYTHONIOENCODING=utf-8 + python run.py >> %CSV_SAVE_PATH%\32-512\log\%LOG_FILE% 2>&1 + if %ERRORLEVEL% neq 0 (exit /b 1) + + call conda deactivate + - name: Concat csv and generate html (32-512) shell: cmd run: | diff --git a/python/llm/test/benchmark/concat_csv.py b/python/llm/test/benchmark/concat_csv.py index f2a712f33b6..908f71f1aed 100644 --- a/python/llm/test/benchmark/concat_csv.py +++ b/python/llm/test/benchmark/concat_csv.py @@ -36,7 +36,7 @@ def main(): merged_df = pd.concat([pd.read_csv(file, index_col=0) for file in csv_files], ignore_index=True) merged_df.reset_index(drop=True, inplace=True) - merged_csv = csv_files[0].replace("_test1", "").replace("_test2", "") + merged_csv = csv_files[0].replace("_test1", "").replace("_test2", "").replace("_test3", "") merged_df.to_csv(merged_csv) if __name__ == "__main__": diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml new file mode 100644 index 00000000000..2b829845860 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml @@ -0,0 +1,13 @@ +repo_id: + - 'Qwen/Qwen1.5-7B-Chat' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '1024-128' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_437.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_437.yaml new file mode 100644 index 00000000000..b5569a2aaa8 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/2048-256_437.yaml @@ -0,0 +1,13 @@ +repo_id: + - 'Qwen/Qwen1.5-7B-Chat' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '2048-256' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-32_437.yaml b/python/llm/test/benchmark/igpu-perf/32-32_437.yaml new file mode 100644 index 00000000000..4ec7bae9e7d --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-32_437.yaml @@ -0,0 +1,13 @@ +repo_id: + - 'Qwen/Qwen1.5-7B-Chat' +local_model_hub: 'path to your local model hub' +warm_up: 3 +num_trials: 5 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-32' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api) diff --git a/python/llm/test/benchmark/igpu-perf/32-512_437.yaml b/python/llm/test/benchmark/igpu-perf/32-512_437.yaml new file mode 100644 index 00000000000..2650ffeaad9 --- /dev/null +++ b/python/llm/test/benchmark/igpu-perf/32-512_437.yaml @@ -0,0 +1,13 @@ +repo_id: + - 'Qwen/Qwen1.5-7B-Chat' +local_model_hub: 'path to your local model hub' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-512' +test_api: + - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: True # whether put embedding to CPU (only avaiable now for gpu win related test_api)