add
Build-Deploy-Actions Details

This commit is contained in:
songw 2023-07-10 18:04:28 +08:00
parent 5bd1c47b1c
commit 88fb11bfd9
4 changed files with 189 additions and 0 deletions

View File

@ -0,0 +1,47 @@
name: Build
run-name: ${{ github.actor }} is upgrade release 🚀
on: [push]
env:
REPOSITORY: ${{ github.repository }}
COMMIT_ID: ${{ github.sha }}
jobs:
Build-Deploy-Actions:
runs-on: ubuntu-latest
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
- name: Check out repository code
uses: actions/checkout@v3
-
name: Setup Git LFS
run: |
git lfs install
git lfs fetch
git lfs checkout
- name: List files in the repository
run: |
ls ${{ github.workspace }}
-
name: Docker Image Info
id: image-info
run: |
echo "::set-output name=image_name::$(echo $REPOSITORY | tr '[:upper:]' '[:lower:]')"
echo "::set-output name=image_tag::${COMMIT_ID:0:10}"
-
name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: artifacts.iflytek.com
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build and push
run: |
docker version
docker buildx build -t artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} . --file ${{ github.workspace }}/Dockerfile --load
docker push artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
docker rmi artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
- run: echo "🍏 This job's status is ${{ job.status }}."

10
Dockerfile Normal file
View File

@ -0,0 +1,10 @@
FROM python:3.10.6
WORKDIR /app
COPY . /app
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple
RUN pip install -r requirements.txt
CMD ["python", "app.py"]

124
app.py Normal file
View File

@ -0,0 +1,124 @@
#!/usr/bin/env python
from __future__ import annotations
import os
import random
import tempfile
import gradio as gr
import imageio
import numpy as np
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from gradio.themes.utils import sizes
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
block_label_text_color = '#4D63FF',
block_title_text_color = '#4D63FF',
button_primary_text_color = '#4D63FF',
button_primary_background_fill='#FFFFFF',
button_primary_border_color='#4D63FF',
button_primary_background_fill_hover='#EDEFFF',
)
css = "footer {visibility: hidden}"
MAX_NUM_FRAMES = int(os.getenv('MAX_NUM_FRAMES', '200'))
DEFAULT_NUM_FRAMES = min(MAX_NUM_FRAMES,
int(os.getenv('DEFAULT_NUM_FRAMES', '16')))
pipe = DiffusionPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b',
torch_dtype=torch.float16,
variant='fp16')
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
def to_video(frames: list[np.ndarray], fps: int) -> str:
out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
writer = imageio.get_writer(out_file.name, format='FFMPEG', fps=fps)
for frame in frames:
writer.append_data(frame)
writer.close()
return out_file.name
def generate(prompt: str, seed: int, num_frames: int,
num_inference_steps: int) -> str:
if seed == -1:
seed = random.randint(0, 1000000)
generator = torch.Generator().manual_seed(seed)
frames = pipe(prompt,
num_inference_steps=num_inference_steps,
num_frames=num_frames,
generator=generator).frames
return to_video(frames, 8)
examples = [
['An astronaut riding a horse.', 0, 16, 25],
['A panda eating bamboo on a rock.', 0, 16, 25],
['Spiderman is surfing.', 0, 16, 25],
]
with gr.Blocks(theme=theme, css=css) as demo:
gr.Markdown("""
<div align='center' ><font size='60'>通过文本合成视频</font></div>
""")
with gr.Group():
with gr.Box():
with gr.Row(elem_id='prompt-container').style(equal_height=True):
prompt = gr.Text(
label='Prompt',
show_label=False,
max_lines=1,
placeholder='输入提示',
elem_id='prompt-text-input').style(container=False)
run_button = gr.Button('生成视频').style(
full_width=False)
result = gr.Video(label='Result', show_label=False, elem_id='gallery')
with gr.Accordion('高级选项', open=False):
seed = gr.Slider(
label='Seed',
minimum=-1,
maximum=1000000,
step=1,
value=-1,
info='If set to -1, a different seed will be used each time.')
num_frames = gr.Slider(
label='Number of frames',
minimum=16,
maximum=MAX_NUM_FRAMES,
step=1,
value=16,
info=
'Note that the content of the video also changes when you change the number of frames.'
)
num_inference_steps = gr.Slider(label='Number of inference steps',
minimum=10,
maximum=50,
step=1,
value=25)
inputs = [
prompt,
seed,
num_frames,
num_inference_steps,
]
gr.Examples(examples=examples,
inputs=inputs,
outputs=result,
fn=generate,
cache_examples=os.getenv('SYSTEM') == 'spaces',
label="示例")
prompt.submit(fn=generate, inputs=inputs, outputs=result)
run_button.click(fn=generate, inputs=inputs, outputs=result)
demo.queue(api_open=False, max_size=15).launch(server_name="0.0.0.0")

8
requirements.txt Normal file
View File

@ -0,0 +1,8 @@
accelerate==0.17.1
git+https://ghproxy.com/https://github.com/huggingface/diffusers@9dc8444
gradio==3.23.0
huggingface-hub==0.13.3
imageio[ffmpeg]==2.26.1
torch==2.0.0
torchvision==0.15.1
transformers==4.27.2