43 lines
1.6 KiB
Python
43 lines
1.6 KiB
Python
import gradio as gr
|
||
import torch
|
||
from transformers import LayoutLMv3Processor, LayoutLMv3ForQuestionAnswering
|
||
|
||
|
||
##这个模型的示例存在问题,无法运行
|
||
|
||
def inference(img):
|
||
pretrained_model_path = "layoutlmv3-base-mpdocvqa"
|
||
processor = LayoutLMv3Processor.from_pretrained(pretrained_model_path, apply_ocr=False)
|
||
model = LayoutLMv3ForQuestionAnswering.from_pretrained(pretrained_model_path)
|
||
|
||
image = img.convert("RGB")
|
||
question = "Is this a question?"
|
||
context = ["Example"]
|
||
boxes = [0, 0, 1000, 1000] # This is an example bounding box covering the whole image.
|
||
document_encoding = processor(image, question, context, boxes=boxes, return_tensors="pt")
|
||
outputs = model(**document_encoding)
|
||
|
||
# Get the answer
|
||
start_idx = torch.argmax(outputs.start_logits, axis=1)
|
||
end_idx = torch.argmax(outputs.end_logits, axis=1)
|
||
answers = processor.tokenizer.decode(input_tokens[start_idx: end_idx + 1]).strip()
|
||
return answers
|
||
|
||
|
||
|
||
title = "layoutlmv3-base-mpdocvqa"
|
||
description = "这是layoutlmv3-base-mpdocvqa的Gradio Demo,用于视觉问答。"
|
||
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>"
|
||
examples = [['example_cat.jpg'], ['Masahiro.png']]
|
||
|
||
demo = gr.Interface(
|
||
fn=inference,
|
||
inputs=[gr.inputs.Image(type="pil")],
|
||
outputs=gr.outputs.Textbox(),
|
||
title=title,
|
||
description=description,
|
||
article=article,
|
||
examples=examples)
|
||
|
||
demo.launch(server_name="0.0.0.0")
|