Skip to content

Commit

Permalink
Update config.yaml (#11208)
Browse files Browse the repository at this point in the history
* update config.yaml

* fix

* minor

* style
  • Loading branch information
hkvision authored Jun 4, 2024
1 parent ac3d53f commit f936641
Showing 1 changed file with 6 additions and 7 deletions.
13 changes: 6 additions & 7 deletions python/llm/dev/benchmark/all-in-one/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,11 @@ in_out_pairs:
- '32-32'
- '1024-128'
test_api:
- "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4)
# - "transformer_int4_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4)
# - "transformer_int4_fp16_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16)
- "transformer_int4_fp16_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16)
# - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp16)
# - "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp32)
# - "transformer_int4_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp32)
# - "transformer_int4_loadlowbit_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), use load_low_bit API. Please make sure you have used the save.py to save the converted low bit model
# - "ipex_fp16_gpu" # on Intel GPU, use native transformers API, (dtype=fp16)
# - "bigdl_fp16_gpu" # on Intel GPU, use ipex-llm transformers API, (dtype=fp16), (qtype=fp16)
# - "optimize_model_gpu" # on Intel GPU, can optimize any pytorch models include transformer model
# - "deepspeed_optimize_model_gpu" # on Intel GPU, deepspeed autotp inference
Expand All @@ -35,8 +34,8 @@ test_api:
# - "deepspeed_transformer_int4_cpu" # on Intel CPU, deepspeed autotp inference
# - "transformer_int4_fp16_lookahead_gpu" # on Intel GPU, transformer-like API, with lookahead, (qtype=int4), (dtype=fp16)
cpu_embedding: False # whether put embedding to CPU
streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api)
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only avaiable now for "pipeline_parallel_gpu" test_api)
n_gpu: 2 # number of GPUs to use (only avaiable now for "pipeline_parallel_gpu" test_api)
streaming: False # whether output in streaming way (only available now for gpu win related test_api)
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
n_gpu: 2 # number of GPUs to use (only available now for "pipeline_parallel_gpu" test_api)
lookahead: 3
max_matching_ngram_size: 2

0 comments on commit f936641

Please sign in to comment.