diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index f7286079060..9420609d307 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -41,6 +41,8 @@ 'decapoda-research/llama-65b-hf','lmsys/vicuna-7b-v1.5', 'lmsys/vicuna-13b-v1.3','lmsys/vicuna-33b-v1.3','project-baize/merged-baize-30b'] +LLAMA3_VISION_IDS = ['meta-llama/Llama-3.2-11B-Vision-Instruct'] + CHATGLM_IDS = ['THUDM/chatglm-6b', 'THUDM/chatglm2-6b', 'THUDM/chatglm3-6b'] LLAVA_IDS = ['liuhaotian/llava-v1.5-7b'] @@ -770,6 +772,13 @@ def run_optimize_model_gpu(repo_id, model = optimize_model(model, low_bit=low_bit) tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') + elif repo_id in LLAMA3_VISION_IDS: + from transformers import MllamaForConditionalGeneration + model = MllamaForConditionalGeneration.from_pretrained(model_path, trust_remote_code=True, + low_cpu_mem_usage=True).eval() + model = optimize_model(model, low_bit=low_bit, modules_to_not_convert=["multi_modal_projector"]) + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + model = model.to('xpu') else: model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype='auto', low_cpu_mem_usage=True, trust_remote_code=True, use_cache=True).eval()