add
Build-Deploy-Actions Details

This commit is contained in:
songw 2023-04-25 11:00:10 +08:00
parent 5c8739dd6d
commit dbb9b2d2eb
5 changed files with 152 additions and 0 deletions

View File

@ -0,0 +1,47 @@
name: Build
run-name: ${{ github.actor }} is upgrade release 🚀
on: [push]
env:
REPOSITORY: ${{ github.repository }}
COMMIT_ID: ${{ github.sha }}
jobs:
Build-Deploy-Actions:
runs-on: ubuntu-latest
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
- name: Check out repository code
uses: actions/checkout@v3
-
name: Setup Git LFS
run: |
git lfs install
git lfs fetch
git lfs checkout
- name: List files in the repository
run: |
ls ${{ github.workspace }}
-
name: Docker Image Info
id: image-info
run: |
echo "::set-output name=image_name::$(echo $REPOSITORY | tr '[:upper:]' '[:lower:]')"
echo "::set-output name=image_tag::${COMMIT_ID:0:10}"
-
name: Login to Docker Hub
uses: docker/login-action@v2
with:
registry: artifacts.iflytek.com
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build and push
run: |
docker version
docker buildx build -t artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }} . --file ${{ github.workspace }}/Dockerfile --load
docker push artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
docker rmi artifacts.iflytek.com/docker-private/atp/${{ steps.image-info.outputs.image_name }}:${{ steps.image-info.outputs.image_tag }}
- run: echo "🍏 This job's status is ${{ job.status }}."

13
Dockerfile Normal file
View File

@ -0,0 +1,13 @@
FROM python:3.7.4-slim
WORKDIR /app
COPY requirements.txt /app
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple/
RUN pip3 install --trusted-host pypi.python.org -r requirements.txt
COPY . /app
CMD ["python", "app.py"]

83
app.py Normal file
View File

@ -0,0 +1,83 @@
from huggingface_hub import hf_hub_download
from transformers import AutoImageProcessor, TableTransformerForObjectDetection
import torch
from PIL import Image
import gradio as gr
from gradio.themes.utils import sizes
import matplotlib.pyplot as plt
import io
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
block_label_text_color = '#4D63FF',
block_title_text_color = '#4D63FF',
button_primary_text_color = '#4D63FF',
button_primary_background_fill='#FFFFFF',
button_primary_border_color='#4D63FF',
button_primary_background_fill_hover='#EDEFFF',
)
# visualization
COLORS = [
[0.000, 0.447, 0.741],
[0.850, 0.325, 0.098],
[0.929, 0.694, 0.125],
[0.494, 0.184, 0.556],
[0.466, 0.674, 0.188],
[0.301, 0.745, 0.933]
]
# Draw the bounding boxes on image.
def fig2img(fig):
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
def visualize_prediction(pil_img, results):
plt.figure(figsize=(16, 10))
plt.imshow(pil_img)
ax = plt.gca()
colors = COLORS * 100
for score, (xmin, ymin, xmax, ymax), color, label in zip(results["scores"].tolist(), results["boxes"].tolist(), colors, results["labels"]):
ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=color, linewidth=3))
ax.text(xmin, ymin, f"{model.config.id2label[label.item()]}: {score:0.2f}", fontsize=15, bbox=dict(facecolor="yellow", alpha=0.5))
plt.axis("off")
return fig2img(plt.gcf())
image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection")
def table_detect(image):
inputs = image_processor(images=image, return_tensors="pt")
outputs = model(**inputs)
# convert outputs (bounding boxes and class logits) to COCO API
target_sizes = torch.tensor([image.size[::-1]])
results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[0]
return visualize_prediction(image, results)
with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo:
gr.Markdown("""
<div align='center' ><font size='60'>表格检测</font></div>
""")
with gr.Row():
with gr.Column():
box1 = gr.Image(label="图片", type="pil")
with gr.Row():
button = gr.Button("提交", variant="primary")
box2 = gr.Image(label="图片")
button.click(fn=table_detect, inputs=box1, outputs=box2)
examples = gr.Examples(examples=[['test.png']], inputs=[box1], label="例子")
if __name__ == "__main__":
demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0")

9
requirements.txt Normal file
View File

@ -0,0 +1,9 @@
gradio
huggingface_hub
torch
transformers
timm
matplotlib
Pillow
requests

BIN
test.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 957 KiB