-
Notifications
You must be signed in to change notification settings - Fork 19
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
matatonic
committed
Apr 4, 2024
1 parent
ead3989
commit a733035
Showing
15 changed files
with
362 additions
and
130 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
FROM python:3.11-slim | ||
|
||
ADD https://github.com/THUDM/CogVLM/raw/main/openai_demo/openai_api.py /usr/src/ | ||
WORKDIR /usr/src | ||
|
||
# reduced dependencies for smaller image size | ||
RUN pip install --no-cache-dir transformers>=4.36.2 torch>=2.1.0 torchvision>=0.16.2 pydantic>=2.6.0 fastapi>=0.109.0 uvicorn>=0.27.0 loguru~=0.7.2 sse-starlette>=1.8.2 \ | ||
sxformers>=0.0.22 accelerate>=0.26.1 pillow>=10.2.0 timm>=0.9.12 einops sentencepiece protobuf bitsandbytes | ||
|
||
CMD python openai_api.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
FROM python:3.11-slim | ||
|
||
RUN apt-get update && apt-get install -y git | ||
|
||
RUN git clone https://github.com/01-ai/Yi /app | ||
WORKDIR /app | ||
RUN pip install --no-cache-dir -r requirements.txt | ||
RUN pip install --no-cache-dir loguru openai sse-starlette tiktoken | ||
|
||
CMD python VL/openai_api.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
|
||
print("deepseek is a WORK IN PROGRESS and doesn't work yet.") | ||
|
||
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM | ||
from deepseek_vl.utils.io import load_pil_images | ||
|
||
# specify the path to the model | ||
# model_path = "deepseek-ai/deepseek-vl-7b-chat" | ||
|
||
class VisionQnA(VisionQnABase): | ||
model_name: str = "deepseek-vl" | ||
format: str = '' | ||
|
||
def __init__(self, model_id: str, device: str, extra_params = {}, format = None): | ||
super().__init__(model_id, device, extra_params, format) | ||
|
||
self.processor = VLChatProcessor.from_pretrained(model_id) | ||
self.model = MultiModalityCausalLM.from_pretrained(**self.params) | ||
|
||
print(f"Loaded on device: {self.model.device} with dtype: {self.model.dtype}") | ||
|
||
async def chat_with_images(self, messages: list[Message], max_tokens: int) -> str: | ||
# XXX WIP | ||
conversation = [ | ||
{ | ||
"role": "User", | ||
"content": "<image_placeholder>Describe each stage of this image.", | ||
"images": ["./images/training_pipelines.jpg"] | ||
}, | ||
{ | ||
"role": "Assistant", | ||
"content": "" | ||
} | ||
] | ||
|
||
# load images and prepare for inputs | ||
pil_images = load_pil_images(conversation) | ||
prepare_inputs = vl_chat_processor( | ||
conversations=conversation, | ||
images=pil_images, | ||
force_batchify=True | ||
).to(self.model.device) | ||
|
||
# run image encoder to get the image embeddings | ||
inputs_embeds = self.model.prepare_inputs_embeds(**prepare_inputs) | ||
|
||
# run the model to get the response | ||
outputs = self.model.language_model.generate( | ||
inputs_embeds=inputs_embeds, | ||
attention_mask=prepare_inputs.attention_mask, | ||
pad_token_id=tokenizer.eos_token_id, | ||
bos_token_id=tokenizer.bos_token_id, | ||
eos_token_id=tokenizer.eos_token_id, | ||
max_new_tokens=512, | ||
do_sample=False, | ||
use_cache=True | ||
) | ||
|
||
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True) | ||
print(f"{prepare_inputs['sft_format'][0]}", answer) | ||
|
Oops, something went wrong.