the app.py is needed to fix bug
This commit is contained in:
commit
902abbf84c
|
@ -0,0 +1,25 @@
|
||||||
|
# please visit https://github.com/xfyun/aiges/releases to get stable and suitable iamges.
|
||||||
|
|
||||||
|
FROM docker.io/library/python:3.8.9
|
||||||
|
|
||||||
|
|
||||||
|
RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
|
||||||
|
RUN sed -i 's|security.debian.org/debian-security|mirrors.ustc.edu.cn/debian-security|g' /etc/apt/sources.list
|
||||||
|
|
||||||
|
|
||||||
|
WORKDIR /home/user/app
|
||||||
|
RUN useradd -m -u 1000 user
|
||||||
|
RUN chown -R 1000.1000 /home/user
|
||||||
|
|
||||||
|
|
||||||
|
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple/
|
||||||
|
|
||||||
|
|
||||||
|
RUN pip install --no-cache-dir pip==22.3.1
|
||||||
|
RUN --mount=target=requirements.txt,source=requirements.txt pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
COPY --chown=1000 ./ /home/user/app
|
||||||
|
|
||||||
|
|
||||||
|
CMD ["python3", "app.py"]
|
Binary file not shown.
After Width: | Height: | Size: 4.2 MiB |
|
@ -0,0 +1,42 @@
|
||||||
|
import gradio as gr
|
||||||
|
import torch
|
||||||
|
from transformers import LayoutLMv3Processor, LayoutLMv3ForQuestionAnswering
|
||||||
|
|
||||||
|
|
||||||
|
##这个模型的示例存在问题,无法运行
|
||||||
|
|
||||||
|
def inference(img):
|
||||||
|
pretrained_model_path = "layoutlmv3-base-mpdocvqa"
|
||||||
|
processor = LayoutLMv3Processor.from_pretrained(pretrained_model_path, apply_ocr=False)
|
||||||
|
model = LayoutLMv3ForQuestionAnswering.from_pretrained(pretrained_model_path)
|
||||||
|
|
||||||
|
image = img.convert("RGB")
|
||||||
|
question = "Is this a question?"
|
||||||
|
context = ["Example"]
|
||||||
|
boxes = [0, 0, 1000, 1000] # This is an example bounding box covering the whole image.
|
||||||
|
document_encoding = processor(image, question, context, boxes=boxes, return_tensors="pt")
|
||||||
|
outputs = model(**document_encoding)
|
||||||
|
|
||||||
|
# Get the answer
|
||||||
|
start_idx = torch.argmax(outputs.start_logits, axis=1)
|
||||||
|
end_idx = torch.argmax(outputs.end_logits, axis=1)
|
||||||
|
answers = processor.tokenizer.decode(input_tokens[start_idx: end_idx + 1]).strip()
|
||||||
|
return answers
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
title = "layoutlmv3-base-mpdocvqa"
|
||||||
|
description = "Gradio Demo for layoutlmv3-base-mpdocvqa. To use it, simply upload your image, or click one of the examples to load them."
|
||||||
|
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>"
|
||||||
|
examples = [['example_cat.jpg'], ['Masahiro.png']]
|
||||||
|
|
||||||
|
demo = gr.Interface(
|
||||||
|
fn=inference,
|
||||||
|
inputs=[gr.inputs.Image(type="pil")],
|
||||||
|
outputs=gr.outputs.Textbox(),
|
||||||
|
title=title,
|
||||||
|
description=description,
|
||||||
|
article=article,
|
||||||
|
examples=examples)
|
||||||
|
|
||||||
|
demo.launch()
|
Binary file not shown.
After Width: | Height: | Size: 169 KiB |
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit d340591382198ec7528c8c3474dd3e77f44f9eed
|
|
@ -0,0 +1,3 @@
|
||||||
|
gradio==3.21.0
|
||||||
|
transformers==4.27.1
|
||||||
|
torch==2.0.0
|
Loading…
Reference in New Issue