from transformers import AutoProcessor, AutoModelForCausalLM from huggingface_hub import hf_hub_download from PIL import Image import gradio as gr import torch processor = AutoProcessor.from_pretrained("microsoft/git-large-vqav2") model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-vqav2") def vqa(image, question): inp = Image.fromarray(image.astype('uint8'), 'RGB') pixel_values = processor(images=inp, return_tensors="pt").pixel_values input_ids = processor(text=question, add_special_tokens=False).input_ids input_ids = [processor.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0) generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) return processor.batch_decode(generated_ids, skip_special_tokens=True) demo = gr.Interface(fn=vqa, inputs=['image', 'text'], outputs='text', title = "vqa", examples = [['soccer.jpg', 'how many people in the picture?']]) if __name__ == "__main__": demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0", server_port = 7024)