ailab/blip-image-captioning-base/app.py

30 lines
962 B
Python

import torch
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
import gradio as gr
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16).to("cuda")
def image2text(image):
inp = Image.fromarray(image.astype('uint8'), 'RGB')
text = "a photography of"
inputs = processor(inp, text, return_tensors="pt").to("cuda", torch.float16)
out = model.generate(**inputs)
return processor.decode(out[0], skip_special_tokens=True)
demo = gr.Interface(fn=image2text,
inputs='image',
outputs='text',
title = "image2text",
examples = ['soccer.jpg'])
if __name__ == "__main__":
demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0", server_port = 7017)