ailab/blip-vqa-base/app.py

28 lines
933 B
Python

import torch
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForQuestionAnswering
import gradio as gr
processor = BlipProcessor.from_pretrained("ybelkada/blip-vqa-base")
model = BlipForQuestionAnswering.from_pretrained("ybelkada/blip-vqa-base", torch_dtype=torch.float16).to("cuda")
def vqa(image, question):
inp = Image.fromarray(image.astype('uint8'), 'RGB')
inputs = processor(inp, question, return_tensors="pt").to("cuda", torch.float16)
out = model.generate(**inputs)
return processor.decode(out[0], skip_special_tokens=True)
demo = gr.Interface(fn=vqa,
inputs=['image', 'text'],
outputs='text',
title = "vqa",
examples = [['soccer.jpg', 'how many people in the picture?']])
if __name__ == "__main__":
demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0", server_port = 7021)