-
Notifications
You must be signed in to change notification settings - Fork 0
/
image_captioning_app.py
33 lines (25 loc) · 1.02 KB
/
image_captioning_app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
import numpy as np
from PIL import Image
from transformers import AutoProcessor, BlipForConditionalGeneration
# Load the pretrained processor and model
processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
def caption_image(input_image: np.ndarray):
# Convert numpy array to PIL Image and convert to RGB
raw_image = Image.fromarray(input_image).convert('RGB')
# Process the image
inputs = processor(raw_image, return_tensors="pt")
# Generate a caption for the image
out = model.generate(**inputs,max_length=50)
# Decode the generated tokens to text
caption = processor.decode(out[0], skip_special_tokens=True)
return caption
iface = gr.Interface(
fn=caption_image,
inputs=gr.Image(),
outputs="text",
title="Image Captioning",
description="This is a simple web app for generating captions for images using a trained model."
)
iface.launch()