From e1ebbd87633108c74b78c5a5644e4e0c6069c29f Mon Sep 17 00:00:00 2001 From: Lilac09 <74996885+Zhengjin-Wang@users.noreply.github.com> Date: Tue, 31 Oct 2023 16:35:18 +0800 Subject: [PATCH] add tools into previously built images (#9317) * modify Dockerfile * manually build * modify Dockerfile * add chat.py into inference-xpu * add benchmark into inference-cpu * manually build * add benchmark into inference-cpu * add benchmark into inference-cpu * add benchmark into inference-cpu * add chat.py into inference-xpu * add chat.py into inference-xpu * change ADD to COPY in dockerfile * fix dependency issue * temporarily remove run-spr in llm-cpu * temporarily remove run-spr in llm-cpu --- docker/llm/inference/xpu/docker/Dockerfile | 7 +- docker/llm/inference/xpu/docker/chat.py | 102 +++++++++++++++++++++ docker/llm/serving/cpu/docker/Dockerfile | 3 +- docker/llm/serving/xpu/docker/Dockerfile | 3 +- 4 files changed, 110 insertions(+), 5 deletions(-) create mode 100644 docker/llm/inference/xpu/docker/chat.py diff --git a/docker/llm/inference/xpu/docker/Dockerfile b/docker/llm/inference/xpu/docker/Dockerfile index 92dc893bc2f..2b705b3e5a5 100644 --- a/docker/llm/inference/xpu/docker/Dockerfile +++ b/docker/llm/inference/xpu/docker/Dockerfile @@ -4,6 +4,9 @@ ARG http_proxy ARG https_proxy ENV TZ=Asia/Shanghai +ENV PYTHONUNBUFFERED=1 + +COPY chat.py /llm/chat.py # Disable pip's cache behavior ARG PIP_NO_CACHE_DIR=false @@ -33,4 +36,6 @@ RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P pip install --pre --upgrade bigdl-llm[xpu] -f https://developer.intel.com/ipex-whl-stable-xpu && \ # Install opencl-related repos apt-get update && \ - apt-get install -y intel-opencl-icd intel-level-zero-gpu level-zero level-zero-dev + apt-get install -y intel-opencl-icd intel-level-zero-gpu level-zero level-zero-dev && \ + # Install related libary of chat.py + pip install --upgrade colorama diff --git a/docker/llm/inference/xpu/docker/chat.py b/docker/llm/inference/xpu/docker/chat.py new file mode 100644 index 00000000000..b40c5f42fda --- /dev/null +++ b/docker/llm/inference/xpu/docker/chat.py @@ -0,0 +1,102 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import intel_extension_for_pytorch as ipex +import torch +import argparse +import sys +# todo: support more model class +from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, AutoConfig +from transformers import TextIteratorStreamer +from transformers.tools.agents import StopSequenceCriteria +from transformers.generation.stopping_criteria import StoppingCriteriaList +from colorama import Fore +from bigdl.llm import optimize_model +SYSTEM_PROMPT = "A chat between a curious human and an artificial intelligence assistant .\ +The assistant gives helpful, detailed, and polite answers to the human's questions." +HUMAN_ID = "" +BOT_ID = "" +# chat_history formated in [(iput_str, output_str)] +def format_prompt(input_str, + chat_history): + prompt = [f"{SYSTEM_PROMPT}\n"] + for history_input_str, history_output_str in chat_history: + prompt.append(f"{HUMAN_ID} {history_input_str}\n{BOT_ID} {history_output_str}\n") + prompt.append(f"{HUMAN_ID} {input_str}\n{BOT_ID} ") + return "".join(prompt) +def stream_chat(model, + tokenizer, + stopping_criteria, + input_str, + chat_history): + prompt = format_prompt(input_str, chat_history) + # print(prompt) + input_ids = tokenizer([prompt], return_tensors="pt").to('xpu') + streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) + generate_kwargs = dict(input_ids, streamer=streamer, max_new_tokens=512, stopping_criteria=stopping_criteria) + from threading import Thread + # to ensure non-blocking access to the generated text, generation process should be ran in a separate thread + thread = Thread(target=model.generate, kwargs=generate_kwargs) + thread.start() + output_str = [] + print(Fore.BLUE+"BigDL-LLM: "+Fore.RESET, end="") + for partial_output_str in streamer: + output_str.append(partial_output_str) + # remove the last HUMAN_ID if exists + print(partial_output_str.replace(f"{HUMAN_ID}", ""), end="") + chat_history.append((input_str, "".join(output_str).replace(f"{HUMAN_ID}", "").rstrip())) +def auto_select_model(model_name): + try: + try: + model = AutoModelForCausalLM.from_pretrained(model_path, + low_cpu_mem_usage=True, + torch_dtype="auto", + trust_remote_code=True, + use_cache=True) + except: + model = AutoModel.from_pretrained(model_path, + low_cpu_mem_usage=True, + torch_dtype="auto", + trust_remote_code=True, + use_cache=True) + except: + print("Sorry, the model you entered is not supported in installer.") + sys.exit() + + return model + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, help="path to an llm") + args = parser.parse_args() + model_path = args.model_path + + model = auto_select_model(model_path) + model = optimize_model(model) + model = model.to('xpu') + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(HUMAN_ID, tokenizer)]) + chat_history = [] + while True: + with torch.inference_mode(): + user_input = input(Fore.GREEN+"\nHuman: "+Fore.RESET) + if user_input == "stop": # let's stop the conversation when user input "stop" + break + stream_chat(model=model, + tokenizer=tokenizer, + stopping_criteria=stopping_criteria, + input_str=user_input, + chat_history=chat_history) + \ No newline at end of file diff --git a/docker/llm/serving/cpu/docker/Dockerfile b/docker/llm/serving/cpu/docker/Dockerfile index 8af0e3b8bbf..1c829bb9c2a 100644 --- a/docker/llm/serving/cpu/docker/Dockerfile +++ b/docker/llm/serving/cpu/docker/Dockerfile @@ -10,8 +10,7 @@ ARG PIP_NO_CACHE_DIR=false COPY ./entrypoint.sh /opt/entrypoint.sh ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /sbin/tini # Install Serving Dependencies -RUN mkdir /llm && \ - cd /llm && \ +RUN cd /llm && \ git clone https://github.com/analytics-zoo/FastChat.git && \ cd FastChat && \ git checkout dev-2023-09-22 && \ diff --git a/docker/llm/serving/xpu/docker/Dockerfile b/docker/llm/serving/xpu/docker/Dockerfile index 28bca6fc3eb..2dc57e72a70 100644 --- a/docker/llm/serving/xpu/docker/Dockerfile +++ b/docker/llm/serving/xpu/docker/Dockerfile @@ -7,8 +7,7 @@ ARG https_proxy ARG PIP_NO_CACHE_DIR=false # Install Serving Dependencies -RUN mkdir /llm && \ - cd /llm && \ +RUN cd /llm && \ git clone https://github.com/analytics-zoo/FastChat.git && \ cd FastChat && \ git checkout dev-2023-09-22 && \