import torch from PIL import Image import gradio as gr from lavis.models import load_model_and_preprocess from lavis.processors import load_processor from gradio.themes.utils import sizes theme = gr.themes.Default(radius_size=sizes.radius_none).set( block_label_text_color = '#4D63FF', block_title_text_color = '#4D63FF', button_primary_text_color = '#4D63FF', button_primary_background_fill='#FFFFFF', button_primary_border_color='#4D63FF', button_primary_background_fill_hover='#EDEFFF', ) raw_image = Image.open("./merlion.png").convert("RGB") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model, vis_processors, text_processors = load_model_and_preprocess("blip_image_text_matching", "large", device=device, is_eval=True) def image_text_match_compute(image, text): raw_image = Image.open(image).convert("RGB") img = vis_processors["eval"](raw_image).unsqueeze(0).to(device) txt = text_processors["eval"](text) itm_output = model({"image": img, "text_input": txt}, match_head="itm") itm_scores = torch.nn.functional.softmax(itm_output, dim=1) return f'The image and text are matched with a probability of {itm_scores[:, 1].item():.3%}' with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo: gr.Markdown("""
图片文本相似度计算
""") with gr.Row(): with gr.Column(): image = gr.Image(label="图片", type="filepath") text = gr.Textbox(label="问题") with gr.Row(): button = gr.Button("提交", variant="primary") box2 = gr.Textbox(label="文本") button.click(fn=image_text_match_compute, inputs=[image, text], outputs=box2) examples = gr.Examples(examples=[['merlion.png', 'merlion in Singapore']], inputs=[image, text], label="例子") if __name__ == "__main__": demo.queue().launch(server_name = "0.0.0.0")