import gradio as gr
from PIL import Image
from transformers import AutoProcessor, AutoModelForCausalLM, AutoConfig


def inference(img):
    pretrained_model_path = "git-large-coco"

    processor = AutoProcessor.from_pretrained(pretrained_model_path)
    model = AutoModelForCausalLM.from_pretrained(pretrained_model_path)

    pixel_values = processor(images=img, return_tensors="pt").pixel_values

    generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
    generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return generated_caption

title = "Image to text:git-large-coco"
description = "Gradio Demo for git-large-coco. To use it, simply upload your image, or click one of the examples to load them."
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>"
examples=[['example_cat.jpg'],['Masahiro.png']]

demo = gr.Interface(
    fn=inference,
    inputs=[gr.inputs.Image(type="pil")],
    outputs=gr.outputs.Textbox(),
    title=title,
    description=description,
    article=article,
    examples=examples)

demo.launch()


##


# model_dir = "hub/animegan2-pytorch-main"
# model_dir_weight = "hub/checkpoints/face_paint_512_v1.pt"
#
# model2 = torch.hub.load(
#     model_dir,
#     "generator",
#     pretrained=True,
#     progress=False,
#     source="local"
# )
# model1 = torch.load(model_dir_weight)
# face2paint = torch.hub.load(
#     model_dir, 'face2paint',
#     size=512,side_by_side=False,
#     source="local"
# )
#
# def inference(img, ver):
#     if ver == 'version 2 (🔺 robustness,🔻 stylization)':
#         out = face2paint(model2, img)
#     else:
#         out = face2paint(model1, img)
#     return out
#