update
This commit is contained in:
parent
f6fd6a5042
commit
a31fb64f53
|
@ -0,0 +1,25 @@
|
|||
# please visit https://github.com/xfyun/aiges/releases to get stable and suitable iamges.
|
||||
|
||||
FROM docker.io/library/python:3.8.9
|
||||
|
||||
|
||||
RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
|
||||
RUN sed -i 's|security.debian.org/debian-security|mirrors.ustc.edu.cn/debian-security|g' /etc/apt/sources.list
|
||||
|
||||
|
||||
WORKDIR /home/user/app
|
||||
RUN useradd -m -u 1000 user
|
||||
RUN chown -R 1000.1000 /home/user
|
||||
|
||||
|
||||
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple/
|
||||
|
||||
|
||||
RUN pip install --no-cache-dir pip==22.3.1
|
||||
RUN --mount=target=requirements.txt,source=requirements.txt pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
|
||||
COPY --chown=1000 ./ /home/user/app
|
||||
|
||||
|
||||
CMD ["python3", "app.py"]
|
|
@ -1,5 +1,4 @@
|
|||
import gradio as gr
|
||||
from PIL import Image
|
||||
from transformers import BeitImageProcessor, BeitForImageClassification
|
||||
|
||||
|
||||
|
@ -18,10 +17,11 @@ def inference(img):
|
|||
|
||||
return model.config.id2label[predicted_class_idx]
|
||||
|
||||
|
||||
title = "Image classification:beit-base-patch16-224-pt22k-ft22k"
|
||||
description = "Gradio Demo for beit-base-patch16-224-pt22k-ft22k. To use it, simply upload your image, or click one of the examples to load them."
|
||||
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>"
|
||||
examples=[['example_cat.jpg'],['Masahiro.png']]
|
||||
examples = [['example_cat.jpg'], ['Masahiro.png']]
|
||||
|
||||
demo = gr.Interface(
|
||||
fn=inference,
|
||||
|
@ -34,31 +34,3 @@ demo = gr.Interface(
|
|||
|
||||
demo.launch()
|
||||
|
||||
|
||||
##
|
||||
|
||||
|
||||
# model_dir = "hub/animegan2-pytorch-main"
|
||||
# model_dir_weight = "hub/checkpoints/face_paint_512_v1.pt"
|
||||
#
|
||||
# model2 = torch.hub.load(
|
||||
# model_dir,
|
||||
# "generator",
|
||||
# pretrained=True,
|
||||
# progress=False,
|
||||
# source="local"
|
||||
# )
|
||||
# model1 = torch.load(model_dir_weight)
|
||||
# face2paint = torch.hub.load(
|
||||
# model_dir, 'face2paint',
|
||||
# size=512,side_by_side=False,
|
||||
# source="local"
|
||||
# )
|
||||
#
|
||||
# def inference(img, ver):
|
||||
# if ver == 'version 2 (🔺 robustness,🔻 stylization)':
|
||||
# out = face2paint(model2, img)
|
||||
# else:
|
||||
# out = face2paint(model1, img)
|
||||
# return out
|
||||
#
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
gradio==3.21.0
|
||||
transformers==4.27.1
|
Loading…
Reference in New Issue