vit-gpt2-coco-en/app.py

53 lines
1.6 KiB
Python

import torch
import requests
from PIL import Image
from transformers import ViTFeatureExtractor, AutoTokenizer, VisionEncoderDecoderModel
import gradio as gr
from gradio.themes.utils import sizes
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
block_label_text_color = '#4D63FF',
block_title_text_color = '#4D63FF',
button_primary_text_color = '#4D63FF',
button_primary_background_fill='#FFFFFF',
button_primary_border_color='#4D63FF',
button_primary_background_fill_hover='#EDEFFF',
)
loc = "ydshieh/vit-gpt2-coco-en"
feature_extractor = ViTFeatureExtractor.from_pretrained(loc)
tokenizer = AutoTokenizer.from_pretrained(loc)
model = VisionEncoderDecoderModel.from_pretrained(loc)
model.eval()
def predict(image):
pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
with torch.no_grad():
output_ids = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True).sequences
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
total_caption = ""
for pred in preds:
total_caption = total_caption + pred.strip()
total_caption = total_caption + "\r\n"
return total_caption
demo = gr.Interface(fn=predict,
inputs='image',
outputs='text',
theme = theme,
css = "footer {visibility: hidden}",
allow_flagging = "never"
examples = ['soccer.jpg'])
if __name__ == "__main__":
demo.queue(concurrency_count=1).launch(server_name = "0.0.0.0")