32 lines
927 B
Python
32 lines
927 B
Python
|
from transformers import ViltProcessor, ViltForQuestionAnswering
|
||
|
import requests
|
||
|
from PIL import Image
|
||
|
import gradio as gr
|
||
|
import torch
|
||
|
|
||
|
|
||
|
processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
||
|
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
||
|
|
||
|
|
||
|
def vqa(image, question):
|
||
|
inp = Image.fromarray(image.astype('uint8'), 'RGB')
|
||
|
inputs = processor(inp, question, return_tensors="pt")
|
||
|
|
||
|
outputs = model(**inputs)
|
||
|
logits = outputs.logits
|
||
|
idx = logits.argmax(-1).item()
|
||
|
|
||
|
return model.config.id2label[idx]
|
||
|
|
||
|
|
||
|
demo = gr.Interface(fn=vqa,
|
||
|
inputs=['image', 'text'],
|
||
|
outputs='text',
|
||
|
title = "vqa",
|
||
|
examples = [['soccer.jpg', 'how many people in the picture?']])
|
||
|
|
||
|
|
||
|
if __name__ == "__main__":
|
||
|
demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0", server_port = 7023)
|