commit 860651068acc1e94c96aee8677aed15ed9174c3e Author: SOULOFCINDER <841135647@qq.com> Date: Fri Mar 31 13:26:18 2023 +0800 inital diff --git a/clipseg_rd64_refined/app.py b/clipseg_rd64_refined/app.py new file mode 100644 index 0000000..f7e96a2 --- /dev/null +++ b/clipseg_rd64_refined/app.py @@ -0,0 +1,37 @@ +import gradio as gr + + +from transformers import AutoProcessor, CLIPSegForImageSegmentation +from PIL import Image +import requests + +def inference(img): + model_path = "clipseg-rd64-refined" + processor = AutoProcessor.from_pretrained(model_path) + model = CLIPSegForImageSegmentation.from_pretrained(model_path) + + texts = ["a cat", "a remote", "a blanket"] + inputs = processor(text=texts, images=[img] * len(texts), padding=True, return_tensors="pt") + + outputs = model(**inputs) + + logits = outputs.logits + print(logits.shape) + return logits.shape + +examples=[['example_cat.jpg']] + +with gr.Blocks() as demo: + gr.Markdown( + """ + # Semantic segmentation:clipseg-rd64-refined + Gradio Demo for clipseg-rd64-refined. To use it, simply upload your image, or click one of the examples to load them. + """) + with gr.Row(): + image_input = gr.Image(type="pil") + text_output = gr.Textbox() + image_button = gr.Button("上传") + image_button.click(inference, inputs=image_input, outputs=text_output) + gr.Examples(examples,inputs=image_input) + +demo.launch() \ No newline at end of file diff --git a/clipseg_rd64_refined/clipseg-rd64-refined b/clipseg_rd64_refined/clipseg-rd64-refined new file mode 160000 index 0000000..583b388 --- /dev/null +++ b/clipseg_rd64_refined/clipseg-rd64-refined @@ -0,0 +1 @@ +Subproject commit 583b388deb98a04feb3e1f816dcdb8f3062ee205 diff --git a/clipseg_rd64_refined/example_cat.jpg b/clipseg_rd64_refined/example_cat.jpg new file mode 100644 index 0000000..e131e8e Binary files /dev/null and b/clipseg_rd64_refined/example_cat.jpg differ