diff --git a/python/llm/dev/benchmark/all-in-one/config.yaml b/python/llm/dev/benchmark/all-in-one/config.yaml index 447beab4b74..2f248dc969f 100644 --- a/python/llm/dev/benchmark/all-in-one/config.yaml +++ b/python/llm/dev/benchmark/all-in-one/config.yaml @@ -12,12 +12,11 @@ in_out_pairs: - '32-32' - '1024-128' test_api: - - "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4) - # - "transformer_int4_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4) - # - "transformer_int4_fp16_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16) + - "transformer_int4_fp16_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp16) # - "transformer_int4_fp16_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp16) + # - "transformer_int4_gpu" # on Intel GPU, transformer-like API, (qtype=int4), (dtype=fp32) + # - "transformer_int4_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), (dtype=fp32) # - "transformer_int4_loadlowbit_gpu_win" # on Intel GPU for Windows, transformer-like API, (qtype=int4), use load_low_bit API. Please make sure you have used the save.py to save the converted low bit model - # - "ipex_fp16_gpu" # on Intel GPU, use native transformers API, (dtype=fp16) # - "bigdl_fp16_gpu" # on Intel GPU, use ipex-llm transformers API, (dtype=fp16), (qtype=fp16) # - "optimize_model_gpu" # on Intel GPU, can optimize any pytorch models include transformer model # - "deepspeed_optimize_model_gpu" # on Intel GPU, deepspeed autotp inference @@ -35,8 +34,8 @@ test_api: # - "deepspeed_transformer_int4_cpu" # on Intel CPU, deepspeed autotp inference # - "transformer_int4_fp16_lookahead_gpu" # on Intel GPU, transformer-like API, with lookahead, (qtype=int4), (dtype=fp16) cpu_embedding: False # whether put embedding to CPU -streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api) -use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only avaiable now for "pipeline_parallel_gpu" test_api) -n_gpu: 2 # number of GPUs to use (only avaiable now for "pipeline_parallel_gpu" test_api) +streaming: False # whether output in streaming way (only available now for gpu win related test_api) +use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api) +n_gpu: 2 # number of GPUs to use (only available now for "pipeline_parallel_gpu" test_api) lookahead: 3 max_matching_ngram_size: 2