Skip to content

Commit

Permalink
0.39.0: +aria, docker changes
Browse files Browse the repository at this point in the history
  • Loading branch information
matatonic committed Oct 10, 2024
1 parent 6d7e4be commit c80e1f2
Show file tree
Hide file tree
Showing 11 changed files with 273 additions and 208 deletions.
17 changes: 3 additions & 14 deletions .github/workflows/build-docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -133,19 +133,6 @@ jobs:
with:
images: ${{ env.DOCKER_REGISTRY }}/${{ env.IMAGE_NAME }}

# Build and push the Docker image to GHCR for the main branch or specific tags
- name: Build and Push Docker Image
if: github.ref == 'refs/heads/main'
uses: docker/build-push-action@v4
with:
context: .
build-args: |
VERSION =alt
file: Dockerfile
push: true
tags: ${{ env.DOCKER_REGISTRY }}/${{ env.IMAGE_NAME }}:latest
labels: version=${{ github.run_id }}

# Build and push the Docker image to GHCR for the main branch or specific tags
- name: Build and Push Docker Image (dev)
if: github.ref == 'refs/heads/dev'
Expand All @@ -167,6 +154,8 @@ jobs:
VERSION =alt
file: Dockerfile
push: true
tags: ${{ env.DOCKER_REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }}
tags: |
${{ env.DOCKER_REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }}
${{ env.DOCKER_REGISTRY }}/${{ env.IMAGE_NAME }}:latest
labels: version=${{ github.run_id }}

12 changes: 9 additions & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ RUN apt-get update && apt-get install -y git gcc \
RUN --mount=type=cache,target=/root/.cache/pip pip install --upgrade pip

WORKDIR /app
RUN git clone https://github.com/TIGER-AI-Lab/Mantis.git --single-branch /app/Mantis
RUN git clone https://github.com/togethercomputer/Dragonfly --single-branch /app/Dragonfly
RUN git clone https://github.com/baaivision/Emu3 --single-branch /app/Emu3
RUN git clone https://github.com/TIGER-AI-Lab/Mantis.git --single-branch /app/Mantis && \
git clone https://github.com/togethercomputer/Dragonfly --single-branch /app/Dragonfly && \
git clone https://github.com/baaivision/Emu3 --single-branch /app/Emu3

COPY requirements.txt .
ARG VERSION=latest
Expand All @@ -26,5 +26,11 @@ COPY *.py .
COPY backend /app/backend
COPY model_conf_tests.json .

ARG USER_ID
ARG GROUP_ID
RUN groupadd -g $GROUP_ID openedai && \
useradd -r -u $USER_ID -g $GROUP_ID -M -d /app openedai

USER openedai
ENV CLI_COMMAND="python vision.py"
CMD $CLI_COMMAND
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ Can't decide which to use? See the [OpenVLM Leaderboard](https://huggingface.co/
- - [ ] [InternVL-Chat-V1-5-AWQ](https://huggingface.co/OpenGVLab/InternVL-Chat-V1-5-AWQ) (wont gpu split yet)
- - [X] [Mini-InternVL-Chat-4B-V1-5](https://huggingface.co/OpenGVLab/Mini-InternVL-Chat-4B-V1-5) (alternate docker only)
- - [X] [Mini-InternVL-Chat-2B-V1-5](https://huggingface.co/OpenGVLab/Mini-InternVL-Chat-2B-V1-5)
- [X] [rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria)
- [X] [Salesforce](https://huggingface.co/Salesforce)
- - [X] [xgen-mm-phi3-mini-instruct-singleimage-r-v1.5](https://huggingface.co/Salesforce/xgen-mm-phi3-mini-instruct-singleimage-r-v1.5)
- - [X] [xgen-mm-phi3-mini-instruct-interleave-r-v1](https://huggingface.co/Salesforce/xgen-mm-phi3-mini-instruct-interleave-r-v1.5)
Expand Down Expand Up @@ -159,6 +160,13 @@ If you can't find your favorite model, you can [open a new issue](https://github

## Recent updates

Version 0.39.0

- new model support: rhymes-ai/Aria
- improved support for multi-image in various models.
- docker package: The latest release will now be tagged with `:latest`, rather than latest commit.
- ⚠️ docker: docker will now run as a user instead of root. Your `hf_home` volume may need the ownership fixed, you can use this command: `sudo chown $(id -u):$(id -g) -R hf_home`

Version 0.38.2

- Fix: multi-image for ovis 1.6
Expand Down
56 changes: 56 additions & 0 deletions backend/aria.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from transformers import AutoModelForCausalLM, AutoProcessor

from vision_qna import *

# rhymes-ai/Aria

class VisionQnA(VisionQnABase):
model_name: str = "aria"
format: str = "chatml"
visual_layers: List[str] = ["vision_tower", "multi_modal_projector"]

def __init__(self, model_id: str, device: str, device_map: str = 'auto', extra_params = {}, format = None):
super().__init__(model_id, device, device_map, extra_params, format)

self.processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=self.params.get('trust_remote_code', False))
self.model = AutoModelForCausalLM.from_pretrained(**self.params).eval()

self.eos_token = '<|im_end|>'

self.loaded_banner()

async def stream_chat_with_images(self, request: ImageChatRequest) -> AsyncGenerator[str, None]:
images, prompt = await chatml_prompt_from_messages(request.messages, img_tok = "<fim_prefix><|img|><fim_suffix>")

prompt = prompt.replace("<fim_suffix><fim_prefix>", "<fim_suffix>\n<fim_prefix>")#.replace('<|im_end|>', '<|im_end|>\n')

if len(images) < 1:
prompt = "<fim_prefix><|img|><fim_suffix>" + prompt
images = [await url_to_image(transparent_pixel_url)]

inputs = self.processor(images=images, text=prompt, return_tensors="pt")
inputs["pixel_values"] = inputs["pixel_values"].to(self.model.dtype)
inputs = inputs.to(self.model.device)

default_params = {
'max_new_tokens': 500,
'do_sample': False,
# 'temperature': 0.9, # random test failures, ex. OCR
'stop_strings': [self.eos_token],
}

params = self.get_generation_params(request, default_params=default_params)

generation_kwargs = dict(
tokenizer=self.processor.tokenizer,
**inputs,
**params,
)

for new_text in threaded_streaming_generator(generate=self.model.generate, tokenizer=self.processor.tokenizer, generation_kwargs=generation_kwargs):
end = new_text.find(self.eos_token)
if end == -1:
yield new_text
else:
yield new_text[:end]
break
3 changes: 3 additions & 0 deletions docker-compose.alt.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@ services:
build:
args:
- VERSION=alt
- USER_ID=${UID:-1000}
- GROUP_ID=${GID:-1000}
dockerfile: Dockerfile
user: ${UID:-1000}:${GID:-1000}
container_name: openedai-vision-alt
image: ghcr.io/matatonic/openedai-vision-alt
env_file: vision-alt.env # your settings go here
Expand Down
3 changes: 3 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@ services:
build:
args:
- VERSION=latest
- USER_ID=${UID:-1000}
- GROUP_ID=${GID:-1000}
dockerfile: Dockerfile
user: ${UID:-1000}:${GID:-1000}
container_name: openedai-vision
image: ghcr.io/matatonic/openedai-vision
env_file: vision.env # your settings go here
Expand Down
1 change: 1 addition & 0 deletions model_conf_tests.json
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@
["qnguyen3/nanoLLaVA-1.5", "-A", "flash_attention_2", "--device-map", "cuda:0"],
["qresearch/llama-3-vision-alpha-hf", "--device", "cuda:0", "--load-in-4bit"],
["qresearch/llama-3-vision-alpha-hf", "--device", "cuda:0"],
["rhymes-ai/Aria", "-A", "flash_attention_2"],
["togethercomputer/Llama-3-8B-Dragonfly-Med-v1", "--load-in-4bit"],
["togethercomputer/Llama-3-8B-Dragonfly-Med-v1"],
["togethercomputer/Llama-3-8B-Dragonfly-v1", "--load-in-4bit"],
Expand Down
4 changes: 4 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,7 @@ mistral_common[opencv]

# got-ocr2
verovio

# Aria. needs to build a bunch and doesn't work without many extra packages
# BYOB, use it if you need it
#grouped_gemm
74 changes: 38 additions & 36 deletions test_api_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,16 @@ def record_result(cmd_args, results, t, mem, note):
params['top_p'] = args.top_p

def generate_response(image_url, prompt):

messages = [{ "role": "system", "content": [{ 'type': 'text', 'text': args.system_prompt }] }] if args.system_prompt else []
messages.extend([
{ "role": "user", "content": [
{ "type": "image_url", "image_url": { "url": image_url } },
{ "type": "text", "text": prompt },
]}])

if isinstance(image_url, str):
image_url = [image_url]

content = []
for url in image_url:
content.extend([{ "type": "image_url", "image_url": { "url": url } }])
content.extend([{ "type": "text", "text": prompt }])
messages.extend([{ "role": "user", "content": content }])

response = client.chat.completions.create(model=args.openai_model, messages=messages, **params)
completion_tokens = 0
Expand All @@ -106,11 +109,15 @@ def generate_response(image_url, prompt):

def generate_stream_response(image_url, prompt):
messages = [{ "role": "system", "content": [{ 'type': 'text', 'text': args.system_prompt }] }] if args.system_prompt else []
messages.extend([
{ "role": "user", "content": [
{ "type": "image_url", "image_url": { "url": image_url } },
{ "type": "text", "text": prompt },
]}])

if isinstance(image_url, str):
image_url = [image_url]

content = []
for url in image_url:
content.extend([{ "type": "image_url", "image_url": { "url": url } }])
content.extend([{ "type": "text", "text": prompt }])
messages.extend([{ "role": "user", "content": content }])

response = client.chat.completions.create(model=args.openai_model, messages=messages, **params, stream=True)
answer = ''
Expand All @@ -129,30 +136,30 @@ def generate_stream_response(image_url, prompt):
### Single round
timing = []

def single_test(url, question, label, generator=generate_response):
def single_test(url, question, right_answer, label, generator=generate_response):
tps_time = time.time()
answer, tok = generator(url, question)
tps_time = time.time() - tps_time
correct = name in answer.lower()
correct = right_answer in answer.lower()
results.extend([correct])
if not correct:
print(f"{name}[{label}]: fail, got: {answer}")
print(f"{right_answer}[{label}]: {red_fail}, got: {answer}")
#if args.abort_on_fail:
# break
else:
print(f"{name}[{label}]: pass{', got: ' + answer if args.verbose else ''}")
print(f"{right_answer}[{label}]: {green_pass}{', got: ' + answer if args.verbose else ''}")
if tok > 1:
timing.extend([(tok, tps_time)])

test_time = time.time()

# url tests
for name, url in urls.items():
single_test(url, "What is the subject of the image?", "url", generate_response)
single_test(url, "What is the subject of the image?", name, "url", generate_response)

data_url = data_url_from_url(url)
single_test(data_url, "What is the subject of the image?", "data", generate_response)
single_test(data_url, "What is the subject of the image?", "data_stream", generate_stream_response)
single_test(data_url, "What is the subject of the image?", name, "data", generate_response)
single_test(data_url, "What is the subject of the image?", name, "data_stream", generate_stream_response)


## OCR tests
Expand All @@ -162,31 +169,26 @@ def single_test(url, question, label, generator=generate_response):
}
for name, question in quality_urls.items():
prompt, data_url = question
single_test(data_url, prompt, "quality", generate_stream_response)
single_test(data_url, prompt, name, "quality", generate_stream_response)

# No image tests
no_image = {
'5': 'In the sequence of numbers: 1, 2, 3, 4, ... What number comes next after 4?'
'5': 'In the sequence of numbers: 1, 2, 3, 4, ... What number comes next after 4? Answer only the number.'
}

def no_image_response(prompt):
messages = [{ "role": "system", "content": [{ 'type': 'text', 'text': args.system_prompt }] }] if args.system_prompt else []
messages.extend([{ "role": "user", "content": prompt }])
for name, prompt in no_image.items():
single_test([], prompt, name, 'no_img', generate_response)

response = client.chat.completions.create(model=args.openai_model, messages=messages, **params, max_tokens=5)
answer = response.choices[0].message.content
return answer
# Multi-image test
multi_image = {
"water": ("What natural element is common in both images?",
[ 'https://images.freeimages.com/images/large-previews/e59/autumn-tree-1408307.jpg',
'https://images.freeimages.com/images/large-previews/242/waterfall-1537490.jpg'])
}

for name, prompt in no_image.items():
answer = no_image_response(prompt)
correct = True #name in answer.lower() # - no exceptions is enough.
results.extend([correct])
if not correct:
print(f"{name}[no_img]: fail, got: {answer}")
if args.abort_on_fail:
break
else:
print(f"{name}[no_img]: pass{', got: ' + answer if args.verbose else ''}")
for name, question in multi_image.items():
prompt, data_url = question
single_test(data_url, prompt, name, "multi-image", generate_stream_response)

test_time = time.time() - test_time

Expand Down
Loading

0 comments on commit c80e1f2

Please sign in to comment.