50 lines
2.0 KiB
Python
50 lines
2.0 KiB
Python
import gradio as gr
|
|
from transformers import AutoImageProcessor, TableTransformerForObjectDetection
|
|
import torch
|
|
from PIL import Image
|
|
|
|
def inference(img):
|
|
pretrained_model_path = "table-transformer-detection"
|
|
|
|
image = img.convert("RGB")
|
|
|
|
image_processor = AutoImageProcessor.from_pretrained(pretrained_model_path)
|
|
model = TableTransformerForObjectDetection.from_pretrained(pretrained_model_path)
|
|
|
|
inputs = image_processor(images=image, return_tensors="pt")
|
|
outputs = model(**inputs)
|
|
|
|
# convert outputs (bounding boxes and class logits) to COCO API
|
|
target_sizes = torch.tensor([image.size[::-1]])
|
|
results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[
|
|
0
|
|
]
|
|
|
|
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
|
box = [round(i, 2) for i in box.tolist()]
|
|
return (
|
|
f"Detected {model.config.id2label[label.item()]} with confidence "
|
|
f"{round(score.item(), 3)} at location {box}"
|
|
)
|
|
|
|
|
|
#title = "#object detection:table-transformer-detection"
|
|
#description = "Gradio Demo for table-transformer-detection. To use it, simply upload your image, or click one of the examples to load them."
|
|
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>"
|
|
examples=[['example_pdf.png']]
|
|
|
|
|
|
with gr.Blocks() as demo:
|
|
gr.Markdown(
|
|
"""
|
|
# object detection:table-transformer-detection
|
|
Gradio Demo for table-transformer-detection. To use it, simply upload your image, or click one of the examples to load them.
|
|
""")
|
|
with gr.Row():
|
|
image_input = gr.Image(type="pil")
|
|
image_output = gr.Textbox()
|
|
image_button = gr.Button("上传")
|
|
image_button.click(inference, inputs=image_input, outputs=image_output)
|
|
gr.Examples(examples,inputs=image_input)
|
|
|
|
demo.launch() |