point-e
Build-Deploy-Actions
Details
Build-Deploy-Actions
Details
This commit is contained in:
commit
11731090f4
|
@ -0,0 +1,34 @@
|
|||
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||
*.model filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
@ -0,0 +1,47 @@
|
|||
name: Build
|
||||
run-name: ${{ github.actor }} is upgrade release 🚀
|
||||
on: [push]
|
||||
env:
|
||||
REPOSITORY: ${{ github.repository }}
|
||||
COMMIT_ID: ${{ github.sha }}
|
||||
jobs:
|
||||
Build-Deploy-Actions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
|
||||
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
|
||||
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Setup Git LFS
|
||||
run: |
|
||||
git lfs install
|
||||
git lfs fetch
|
||||
git lfs checkout
|
||||
- name: List files in the repository
|
||||
run: |
|
||||
ls ${{ github.workspace }}
|
||||
-
|
||||
name: Docker Image Info
|
||||
id: image-info
|
||||
run: |
|
||||
echo "::set-output name=image_name::$(echo $REPOSITORY | tr '[:upper:]' '[:lower:]')"
|
||||
echo "::set-output name=image_tag::${COMMIT_ID:0:10}"
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: artifacts.iflytek.com
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Build and push
|
||||
run: |
|
||||
docker version
|
||||
docker buildx build -t artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} . --file ${{ github.workspace }}/Dockerfile --load
|
||||
docker push artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
|
||||
docker rmi artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
|
||||
- run: echo "🍏 This job's status is ${{ job.status }}."
|
|
@ -0,0 +1 @@
|
|||
Subproject commit a9b1bf5920416aaeaec965c25dd9e8f98c864f16
|
|
@ -0,0 +1,13 @@
|
|||
#FROM python:3.8.13
|
||||
FROM artifacts.iflytek.com/docker-private/atp/base_image_for_ailab:0.0.1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . /app
|
||||
|
||||
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
RUN cd point-e && python setup.py install && cd ../CLIP && python setup.py install
|
||||
|
||||
CMD ["python", "app.py"]
|
|
@ -0,0 +1,81 @@
|
|||
import gradio as gr
|
||||
import plotly.graph_objects as go
|
||||
|
||||
import torch
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
from point_e.diffusion.configs import DIFFUSION_CONFIGS, diffusion_from_config
|
||||
from point_e.diffusion.sampler import PointCloudSampler
|
||||
from point_e.models.download import load_checkpoint
|
||||
from point_e.models.configs import MODEL_CONFIGS, model_from_config
|
||||
from point_e.util.plotting import plot_point_cloud
|
||||
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
print('creating base model...')
|
||||
base_name = 'base40M-textvec'
|
||||
base_model = model_from_config(MODEL_CONFIGS[base_name], device)
|
||||
base_model.eval()
|
||||
base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])
|
||||
|
||||
print('creating upsample model...')
|
||||
upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)
|
||||
upsampler_model.eval()
|
||||
upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])
|
||||
|
||||
print('downloading base checkpoint...')
|
||||
base_model.load_state_dict(load_checkpoint(base_name, device))
|
||||
|
||||
print('downloading upsampler checkpoint...')
|
||||
upsampler_model.load_state_dict(load_checkpoint('upsample', device))
|
||||
|
||||
sampler = PointCloudSampler(
|
||||
device=device,
|
||||
models=[base_model, upsampler_model],
|
||||
diffusions=[base_diffusion, upsampler_diffusion],
|
||||
num_points=[1024, 4096 - 1024],
|
||||
aux_channels=['R', 'G', 'B'],
|
||||
guidance_scale=[3.0, 0.0],
|
||||
model_kwargs_key_filter=('texts', ''), # Do not condition the upsampler at all
|
||||
)
|
||||
|
||||
def inference(prompt):
|
||||
samples = None
|
||||
for x in sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(texts=[prompt])):
|
||||
samples = x
|
||||
pc = sampler.output_to_point_clouds(samples)[0]
|
||||
pc = sampler.output_to_point_clouds(samples)[0]
|
||||
colors=(238, 75, 43)
|
||||
fig = go.Figure(
|
||||
data=[
|
||||
go.Scatter3d(
|
||||
x=pc.coords[:,0], y=pc.coords[:,1], z=pc.coords[:,2],
|
||||
mode='markers',
|
||||
marker=dict(
|
||||
size=2,
|
||||
color=['rgb({},{},{})'.format(r,g,b) for r,g,b in zip(pc.channels["R"], pc.channels["G"], pc.channels["B"])],
|
||||
)
|
||||
)
|
||||
],
|
||||
layout=dict(
|
||||
scene=dict(
|
||||
xaxis=dict(visible=False),
|
||||
yaxis=dict(visible=False),
|
||||
zaxis=dict(visible=False)
|
||||
)
|
||||
),
|
||||
)
|
||||
return fig
|
||||
|
||||
demo = gr.Interface(
|
||||
fn=inference,
|
||||
inputs="text",
|
||||
outputs=gr.Plot(),
|
||||
examples=[
|
||||
["a red motorcycle"],
|
||||
["a RED pumpkin"],
|
||||
["a yellow rubber duck"]
|
||||
],
|
||||
)
|
||||
demo.queue(max_size=30)
|
||||
demo.launch(server_name = "0.0.0.0")
|
|
@ -0,0 +1 @@
|
|||
Subproject commit fc8a607c08a3ea804cc82bf1ef8628f88a3a5d2f
|
|
@ -0,0 +1,18 @@
|
|||
#git+https://github.com/openai/point-e
|
||||
plotly
|
||||
scipy==1.10.1
|
||||
filelock
|
||||
Pillow
|
||||
torch
|
||||
fire
|
||||
humanize
|
||||
requests
|
||||
tqdm
|
||||
matplotlib
|
||||
scikit-image
|
||||
numpy
|
||||
torch
|
||||
torchvision
|
||||
ftfy
|
||||
regex
|
||||
tqdm
|
Loading…
Reference in New Issue