add
Build-Deploy-Actions Details

This commit is contained in:
songw 2023-05-12 10:18:27 +08:00
parent ca4ef73215
commit 18bd6f743a
11 changed files with 457 additions and 0 deletions

View File

@ -0,0 +1,47 @@
name: Build
run-name: ${{ github.actor }} is upgrade release 🚀
on: [push]
env:
REPOSITORY: ${{ github.repository }}
COMMIT_ID: ${{ github.sha }}
jobs:
Build-Deploy-Actions:
runs-on: ubuntu-latest
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
- name: Check out repository code
uses: actions/checkout@v3
-
name: Setup Git LFS
run: |
git lfs install
git lfs fetch
git lfs checkout
- name: List files in the repository
run: |
ls ${{ github.workspace }}
-
name: Docker Image Info
id: image-info
run: |
echo "::set-output name=image_name::$(echo $REPOSITORY | tr '[:upper:]' '[:lower:]')"
echo "::set-output name=image_tag::${COMMIT_ID:0:10}"
-
name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: artifacts.iflytek.com
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build and push
run: |
docker version
docker buildx build -t artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} . --file ${{ github.workspace }}/Dockerfile --load
docker push artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
docker rmi artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
- run: echo "🍏 This job's status is ${{ job.status }}."

10
Dockerfile Normal file
View File

@ -0,0 +1,10 @@
FROM python:3.11
WORKDIR /app
COPY . /app
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple
RUN pip install -r requirements.txt
CMD ["python", "app.py"]

34
app.py Normal file
View File

@ -0,0 +1,34 @@
import os
import gradio as gr
import torch
from app_image_to_3d import create_demo as create_demo_image_to_3d
from app_text_to_3d import create_demo as create_demo_text_to_3d
from model import Model
from gradio.themes.utils import sizes
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
block_label_text_color = '#4D63FF',
block_title_text_color = '#4D63FF',
button_primary_text_color = '#4D63FF',
button_primary_background_fill='#FFFFFF',
button_primary_border_color='#4D63FF',
button_primary_background_fill_hover='#EDEFFF',
)
css = "footer {visibility: hidden}"
model = Model()
with gr.Blocks(theme=theme, css=css) as demo:
gr.Markdown("""
<div align='center' ><font size='60'>3D模型生成</font></div>
""")
with gr.Tabs():
with gr.Tab(label='文本到 3D'):
create_demo_text_to_3d(model)
with gr.Tab(label='图片到3D'):
create_demo_image_to_3d(model)
demo.queue().launch(server_name='0.0.0.0')

80
app_image_to_3d.py Normal file
View File

@ -0,0 +1,80 @@
#!/usr/bin/env python
import pathlib
import shlex
import subprocess
import gradio as gr
from model import Model
from settings import CACHE_EXAMPLES, MAX_SEED
from utils import randomize_seed_fn
from gradio.themes.utils import sizes
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
block_label_text_color = '#4D63FF',
block_title_text_color = '#4D63FF',
button_primary_text_color = '#4D63FF',
button_primary_background_fill='#FFFFFF',
button_primary_border_color='#4D63FF',
button_primary_background_fill_hover='#EDEFFF',
)
def create_demo(model: Model) -> gr.Blocks:
examples = ['corgi.png']
def process_example_fn(image_path: str) -> str:
return model.run_image(image_path)
with gr.Blocks(theme=theme) as demo:
with gr.Box():
image = gr.Image(label='输入图片',
show_label=False,
type='filepath')
run_button = gr.Button('运行')
result = gr.Model3D(label='结果', show_label=False)
with gr.Accordion('高级选项', open=False):
seed = gr.Slider(label='Seed',
minimum=0,
maximum=MAX_SEED,
step=1,
value=0)
randomize_seed = gr.Checkbox(label='Randomize seed',
value=True)
guidance_scale = gr.Slider(label='Guidance scale',
minimum=1,
maximum=20,
step=0.1,
value=3.0)
num_inference_steps = gr.Slider(
label='Number of inference steps',
minimum=1,
maximum=100,
step=1,
value=64)
gr.Examples(examples=examples,
inputs=image,
outputs=result,
fn=process_example_fn,
cache_examples=CACHE_EXAMPLES,
label="例子")
inputs = [
image,
seed,
guidance_scale,
num_inference_steps,
]
run_button.click(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
).then(
fn=model.run_image,
inputs=inputs,
outputs=result,
)
return demo

101
app_text_to_3d.py Normal file
View File

@ -0,0 +1,101 @@
#!/usr/bin/env python
import gradio as gr
from model import Model
from settings import CACHE_EXAMPLES, MAX_SEED
from utils import randomize_seed_fn
from gradio.themes.utils import sizes
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
block_label_text_color = '#4D63FF',
block_title_text_color = '#4D63FF',
button_primary_text_color = '#4D63FF',
button_primary_background_fill='#FFFFFF',
button_primary_border_color='#4D63FF',
button_primary_background_fill_hover='#EDEFFF',
)
def create_demo(model: Model) -> gr.Blocks:
examples = [
'A chair that looks like an avocado',
'An airplane that looks like a banana',
'A spaceship',
'A birthday cupcake',
'A chair that looks like a tree',
'A green boot',
'A penguin',
'Ube ice cream cone',
'A bowl of vegetables',
]
def process_example_fn(prompt: str) -> str:
return model.run_text(prompt)
with gr.Blocks(theme=theme) as demo:
with gr.Box():
with gr.Row(elem_id='prompt-container'):
prompt = gr.Text(
label='提示词',
show_label=False,
max_lines=1,
placeholder='输入提示词').style(container=False)
run_button = gr.Button('运行').style(full_width=False)
result = gr.Model3D(label='结果', show_label=False)
with gr.Accordion('高级选项', open=False):
seed = gr.Slider(label='Seed',
minimum=0,
maximum=MAX_SEED,
step=1,
value=0)
randomize_seed = gr.Checkbox(label='Randomize seed',
value=True)
guidance_scale = gr.Slider(label='Guidance scale',
minimum=1,
maximum=20,
step=0.1,
value=15.0)
num_inference_steps = gr.Slider(
label='Number of inference steps',
minimum=1,
maximum=100,
step=1,
value=64)
gr.Examples(examples=examples,
inputs=prompt,
outputs=result,
fn=process_example_fn,
cache_examples=CACHE_EXAMPLES,
label="例子")
inputs = [
prompt,
seed,
guidance_scale,
num_inference_steps,
]
prompt.submit(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
).then(
fn=model.run_text,
inputs=inputs,
outputs=result,
)
run_button.click(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
).then(
fn=model.run_text,
inputs=inputs,
outputs=result,
)
return demo

BIN
corgi.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

148
model.py Normal file
View File

@ -0,0 +1,148 @@
import tempfile
import numpy as np
import torch
import trimesh
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
from shap_e.diffusion.sample import sample_latents
from shap_e.models.download import load_config, load_model
from shap_e.models.nn.camera import (DifferentiableCameraBatch,
DifferentiableProjectiveCamera)
from shap_e.models.transmitter.base import Transmitter, VectorDecoder
from shap_e.rendering.torch_mesh import TorchMesh
from shap_e.util.collections import AttrDict
from shap_e.util.image_util import load_image
# Copied from https://github.com/openai/shap-e/blob/d99cedaea18e0989e340163dbaeb4b109fa9e8ec/shap_e/util/notebooks.py#L15-L42
def create_pan_cameras(size: int,
device: torch.device) -> DifferentiableCameraBatch:
origins = []
xs = []
ys = []
zs = []
for theta in np.linspace(0, 2 * np.pi, num=20):
z = np.array([np.sin(theta), np.cos(theta), -0.5])
z /= np.sqrt(np.sum(z**2))
origin = -z * 4
x = np.array([np.cos(theta), -np.sin(theta), 0.0])
y = np.cross(z, x)
origins.append(origin)
xs.append(x)
ys.append(y)
zs.append(z)
return DifferentiableCameraBatch(
shape=(1, len(xs)),
flat_camera=DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(origins,
axis=0)).float().to(device),
x=torch.from_numpy(np.stack(xs, axis=0)).float().to(device),
y=torch.from_numpy(np.stack(ys, axis=0)).float().to(device),
z=torch.from_numpy(np.stack(zs, axis=0)).float().to(device),
width=size,
height=size,
x_fov=0.7,
y_fov=0.7,
),
)
# Copied from https://github.com/openai/shap-e/blob/8625e7c15526d8510a2292f92165979268d0e945/shap_e/util/notebooks.py#LL64C1-L76C33
@torch.no_grad()
def decode_latent_mesh(
xm: Transmitter | VectorDecoder,
latent: torch.Tensor,
) -> TorchMesh:
decoded = xm.renderer.render_views(
AttrDict(cameras=create_pan_cameras(
2, latent.device)), # lowest resolution possible
params=(xm.encoder if isinstance(xm, Transmitter) else
xm).bottleneck_to_params(latent[None]),
options=AttrDict(rendering_mode='stf', render_with_direction=False),
)
return decoded.raw_meshes[0]
class Model:
def __init__(self):
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.xm = load_model('transmitter', device=self.device)
self.diffusion = diffusion_from_config(load_config('diffusion'))
self.model_text = None
self.model_image = None
def load_model(self, model_name: str) -> None:
assert model_name in ['text300M', 'image300M']
if model_name == 'text300M' and self.model_text is None:
self.model_text = load_model(model_name, device=self.device)
elif model_name == 'image300M' and self.model_image is None:
self.model_image = load_model(model_name, device=self.device)
def to_glb(self, latent: torch.Tensor) -> str:
ply_path = tempfile.NamedTemporaryFile(suffix='.ply',
delete=False,
mode='w+b')
decode_latent_mesh(self.xm, latent).tri_mesh().write_ply(ply_path)
mesh = trimesh.load(ply_path.name)
rot = trimesh.transformations.rotation_matrix(-np.pi / 2, [1, 0, 0])
mesh = mesh.apply_transform(rot)
rot = trimesh.transformations.rotation_matrix(np.pi, [0, 1, 0])
mesh = mesh.apply_transform(rot)
mesh_path = tempfile.NamedTemporaryFile(suffix='.glb', delete=False)
mesh.export(mesh_path.name, file_type='glb')
return mesh_path.name
def run_text(self,
prompt: str,
seed: int = 0,
guidance_scale: float = 15.0,
num_steps: int = 64) -> str:
self.load_model('text300M')
torch.manual_seed(seed)
latents = sample_latents(
batch_size=1,
model=self.model_text,
diffusion=self.diffusion,
guidance_scale=guidance_scale,
model_kwargs=dict(texts=[prompt]),
progress=True,
clip_denoised=True,
use_fp16=True,
use_karras=True,
karras_steps=num_steps,
sigma_min=1e-3,
sigma_max=160,
s_churn=0,
)
return self.to_glb(latents[0])
def run_image(self,
image_path: str,
seed: int = 0,
guidance_scale: float = 3.0,
num_steps: int = 64) -> str:
self.load_model('image300M')
torch.manual_seed(seed)
image = load_image(image_path)
latents = sample_latents(
batch_size=1,
model=self.model_image,
diffusion=self.diffusion,
guidance_scale=guidance_scale,
model_kwargs=dict(images=[image]),
progress=True,
clip_denoised=True,
use_fp16=True,
use_karras=True,
karras_steps=num_steps,
sigma_min=1e-3,
sigma_max=160,
s_churn=0,
)
return self.to_glb(latents[0])

6
requirements.txt Normal file
View File

@ -0,0 +1,6 @@
git+https://ghproxy.com/https://github.com/openai/shap-e
gradio==3.30.0
torch==2.0.0
torchvision==0.15.1
trimesh==3.21.5

7
settings.py Normal file
View File

@ -0,0 +1,7 @@
import os
import numpy as np
CACHE_EXAMPLES = os.getenv('CACHE_EXAMPLES') == '1'
MAX_SEED = np.iinfo(np.int32).max

14
style.css Normal file
View File

@ -0,0 +1,14 @@
h1 {
text-align: center;
}
#component-0 {
max-width: 730px;
margin: auto;
padding-top: 1.5rem;
}
#prompt-container {
gap: 0;
}

10
utils.py Normal file
View File

@ -0,0 +1,10 @@
import random
from settings import MAX_SEED
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed