40 lines
1.1 KiB
Python
40 lines
1.1 KiB
Python
from transformers import pipeline
|
|
import gradio as gr
|
|
import cv2
|
|
from PIL import Image
|
|
from gradio.themes.utils import sizes
|
|
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
|
|
block_label_text_color = '#4D63FF',
|
|
block_title_text_color = '#4D63FF',
|
|
button_primary_text_color = '#4D63FF',
|
|
button_primary_background_fill='#FFFFFF',
|
|
button_primary_border_color='#4D63FF',
|
|
button_primary_background_fill_hover='#EDEFFF',
|
|
)
|
|
|
|
|
|
image_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
|
|
|
|
|
|
def ocr(image):
|
|
inp = Image.fromarray(image.astype('uint8'), 'RGB')
|
|
text = image_to_text(inp)
|
|
|
|
total_caption = ""
|
|
for caption in text:
|
|
total_caption = total_caption + caption.get('generated_text')
|
|
total_caption = total_caption + '\r\n'
|
|
|
|
return total_caption
|
|
|
|
demo = gr.Interface(fn=ocr,
|
|
inputs='image',
|
|
outputs='text',
|
|
title = "image2text",
|
|
theme = theme,
|
|
examples = ['soccer.jpg'])
|
|
|
|
|
|
if __name__ == "__main__":
|
|
demo.queue(concurrency_count=10).launch()
|