import gradio as gr from transformers import AutoImageProcessor, TableTransformerForObjectDetection import torch from PIL import Image def inference(img): pretrained_model_path = "table-transformer-detection" image = img.convert("RGB") image_processor = AutoImageProcessor.from_pretrained(pretrained_model_path) model = TableTransformerForObjectDetection.from_pretrained(pretrained_model_path) inputs = image_processor(images=image, return_tensors="pt") outputs = model(**inputs) # convert outputs (bounding boxes and class logits) to COCO API target_sizes = torch.tensor([image.size[::-1]]) results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[ 0 ] for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): box = [round(i, 2) for i in box.tolist()] return ( f"Detected {model.config.id2label[label.item()]} with confidence " f"{round(score.item(), 3)} at location {box}" ) title = "object detection:table-transformer-detection" description = "Gradio Demo for table-transformer-detection. To use it, simply upload your image, or click one of the examples to load them." article = "

Github Repo Pytorch

visitor badge

" examples=[['example_pdf.png']] demo = gr.Interface( fn=inference, inputs=[gr.inputs.Image(type="pil")], outputs=gr.outputs.Textbox(), title=title, description=description, article=article, examples=examples) demo.launch()