ok
Build-Deploy-Actions
Details
Build-Deploy-Actions
Details
This commit is contained in:
commit
1f2ed0073d
|
@ -0,0 +1,29 @@
|
|||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
||||
model.pth filter=lfs diff=lfs merge=lfs -text
|
||||
model2.pth filter=lfs diff=lfs merge=lfs -text
|
|
@ -0,0 +1,47 @@
|
|||
name: Build
|
||||
run-name: ${{ github.actor }} is upgrade release 🚀
|
||||
on: [push]
|
||||
env:
|
||||
REPOSITORY: ${{ github.repository }}
|
||||
COMMIT_ID: ${{ github.sha }}
|
||||
jobs:
|
||||
Build-Deploy-Actions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
|
||||
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
|
||||
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Setup Git LFS
|
||||
run: |
|
||||
git lfs install
|
||||
git lfs fetch
|
||||
git lfs checkout
|
||||
- name: List files in the repository
|
||||
run: |
|
||||
ls ${{ github.workspace }}
|
||||
-
|
||||
name: Docker Image Info
|
||||
id: image-info
|
||||
run: |
|
||||
echo "::set-output name=image_name::$(echo $REPOSITORY | tr '[:upper:]' '[:lower:]')"
|
||||
echo "::set-output name=image_tag::${COMMIT_ID:0:10}"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: artifacts.iflytek.com
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Build and push
|
||||
run: |
|
||||
docker version
|
||||
docker buildx build -t artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} . --file ${{ github.workspace }}/Dockerfile --load
|
||||
docker push artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
|
||||
docker rmi artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
|
||||
- run: echo "🍏 This job's status is ${{ job.status }}."
|
|
@ -0,0 +1,11 @@
|
|||
#FROM python:3.8.13
|
||||
FROM artifacts.iflytek.com/docker-private/atp/base_image_for_ailab:0.0.1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . /app
|
||||
|
||||
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
CMD ["python", "app.py"]
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
title: Informativedrawings
|
||||
emoji: 📉
|
||||
colorFrom: gray
|
||||
colorTo: blue
|
||||
sdk: gradio
|
||||
app_file: app.py
|
||||
pinned: false
|
||||
license: mit
|
||||
---
|
||||
|
||||
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
@ -0,0 +1,128 @@
|
|||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import gradio as gr
|
||||
from PIL import Image
|
||||
import torchvision.transforms as transforms
|
||||
from gradio.themes.utils import sizes
|
||||
|
||||
|
||||
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
|
||||
block_label_text_color = '#4D63FF',
|
||||
block_title_text_color = '#4D63FF',
|
||||
button_primary_text_color = '#4D63FF',
|
||||
button_primary_background_fill='#FFFFFF',
|
||||
button_primary_border_color='#4D63FF',
|
||||
button_primary_background_fill_hover='#EDEFFF',
|
||||
)
|
||||
|
||||
norm_layer = nn.InstanceNorm2d
|
||||
|
||||
class ResidualBlock(nn.Module):
|
||||
def __init__(self, in_features):
|
||||
super(ResidualBlock, self).__init__()
|
||||
|
||||
conv_block = [ nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
norm_layer(in_features),
|
||||
nn.ReLU(inplace=True),
|
||||
nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
norm_layer(in_features)
|
||||
]
|
||||
|
||||
self.conv_block = nn.Sequential(*conv_block)
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.conv_block(x)
|
||||
|
||||
|
||||
class Generator(nn.Module):
|
||||
def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True):
|
||||
super(Generator, self).__init__()
|
||||
|
||||
# Initial convolution block
|
||||
model0 = [ nn.ReflectionPad2d(3),
|
||||
nn.Conv2d(input_nc, 64, 7),
|
||||
norm_layer(64),
|
||||
nn.ReLU(inplace=True) ]
|
||||
self.model0 = nn.Sequential(*model0)
|
||||
|
||||
# Downsampling
|
||||
model1 = []
|
||||
in_features = 64
|
||||
out_features = in_features*2
|
||||
for _ in range(2):
|
||||
model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
|
||||
norm_layer(out_features),
|
||||
nn.ReLU(inplace=True) ]
|
||||
in_features = out_features
|
||||
out_features = in_features*2
|
||||
self.model1 = nn.Sequential(*model1)
|
||||
|
||||
model2 = []
|
||||
# Residual blocks
|
||||
for _ in range(n_residual_blocks):
|
||||
model2 += [ResidualBlock(in_features)]
|
||||
self.model2 = nn.Sequential(*model2)
|
||||
|
||||
# Upsampling
|
||||
model3 = []
|
||||
out_features = in_features//2
|
||||
for _ in range(2):
|
||||
model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
|
||||
norm_layer(out_features),
|
||||
nn.ReLU(inplace=True) ]
|
||||
in_features = out_features
|
||||
out_features = in_features//2
|
||||
self.model3 = nn.Sequential(*model3)
|
||||
|
||||
# Output layer
|
||||
model4 = [ nn.ReflectionPad2d(3),
|
||||
nn.Conv2d(64, output_nc, 7)]
|
||||
if sigmoid:
|
||||
model4 += [nn.Sigmoid()]
|
||||
|
||||
self.model4 = nn.Sequential(*model4)
|
||||
|
||||
def forward(self, x, cond=None):
|
||||
out = self.model0(x)
|
||||
out = self.model1(out)
|
||||
out = self.model2(out)
|
||||
out = self.model3(out)
|
||||
out = self.model4(out)
|
||||
|
||||
return out
|
||||
|
||||
model1 = Generator(3, 1, 3)
|
||||
model1.load_state_dict(torch.load('model.pth', map_location=torch.device('cpu')))
|
||||
model1.eval()
|
||||
|
||||
model2 = Generator(3, 1, 3)
|
||||
model2.load_state_dict(torch.load('model2.pth', map_location=torch.device('cpu')))
|
||||
model2.eval()
|
||||
|
||||
def predict(input_img, ver):
|
||||
input_img = Image.open(input_img)
|
||||
transform = transforms.Compose([transforms.Resize(256, Image.BICUBIC), transforms.ToTensor()])
|
||||
input_img = transform(input_img)
|
||||
input_img = torch.unsqueeze(input_img, 0)
|
||||
|
||||
drawing = 0
|
||||
with torch.no_grad():
|
||||
if ver == '风格1':
|
||||
drawing = model2(input_img)[0].detach()
|
||||
else:
|
||||
drawing = model1(input_img)[0].detach()
|
||||
|
||||
drawing = transforms.ToPILImage()(drawing)
|
||||
return drawing
|
||||
|
||||
examples=[['cat.png', '风格1'], ['bridge.png', '风格1'], ['lizard.png', '风格2'],]
|
||||
|
||||
|
||||
iface = gr.Interface(predict, [gr.inputs.Image(type='filepath'),
|
||||
gr.inputs.Radio(['风格1','风格2'], type="value", default='风格1')],
|
||||
gr.outputs.Image(type="pil"), examples=examples)
|
||||
|
||||
iface.launch(server_name = "0.0.0.0")
|
Binary file not shown.
After Width: | Height: | Size: 145 KiB |
Binary file not shown.
After Width: | Height: | Size: 135 KiB |
Binary file not shown.
|
@ -0,0 +1,2 @@
|
|||
torchvision
|
||||
Pillow
|
Loading…
Reference in New Issue