34 lines
1.1 KiB
Python
34 lines
1.1 KiB
Python
import gradio as gr
|
|
import torch
|
|
|
|
model2 = torch.hub.load(
|
|
"AK391/animegan2-pytorch:main",
|
|
"generator",
|
|
pretrained=True,
|
|
progress=False
|
|
)
|
|
model1 = torch.hub.load("AK391/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1")
|
|
face2paint = torch.hub.load(
|
|
'AK391/animegan2-pytorch:main', 'face2paint',
|
|
size=512,side_by_side=False
|
|
)
|
|
|
|
def inference(img, ver):
|
|
if ver == 'version 2 (🔺 robustness,🔻 stylization)':
|
|
out = face2paint(model2, img)
|
|
else:
|
|
out = face2paint(model1, img)
|
|
return out
|
|
|
|
title = "动漫风格迁移"
|
|
examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
|
|
|
|
demo = gr.Interface(
|
|
fn=inference,
|
|
inputs=[gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')],
|
|
outputs=gr.outputs.Image(type="pil"),
|
|
title=title,
|
|
examples=examples)
|
|
|
|
demo.launch(server_name = "0.0.0.0", server_port = 7022)
|