124 lines
4.0 KiB
Python
124 lines
4.0 KiB
Python
#全景分割
|
|
from PIL import Image
|
|
import io
|
|
import matplotlib.pyplot as plt
|
|
import torch
|
|
import torchvision.transforms as T
|
|
import numpy
|
|
import gradio as gr
|
|
import itertools
|
|
import seaborn as sns
|
|
from panopticapi.utils import rgb2id
|
|
from gradio.themes.utils import sizes
|
|
from transformers import DetrFeatureExtractor, DetrForSegmentation
|
|
|
|
|
|
theme = gr.themes.Default(radius_size=sizes.radius_none).set(
|
|
block_label_text_color = '#4D63FF',
|
|
block_title_text_color = '#4D63FF',
|
|
button_primary_text_color = '#4D63FF',
|
|
button_primary_background_fill='#FFFFFF',
|
|
button_primary_border_color='#4D63FF',
|
|
button_primary_background_fill_hover='#EDEFFF',
|
|
)
|
|
|
|
|
|
# These are the COCO classes
|
|
CLASSES = [
|
|
'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
|
|
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',
|
|
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
|
|
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack',
|
|
'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
|
|
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
|
|
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass',
|
|
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
|
|
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
|
|
'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A',
|
|
'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
|
|
'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A',
|
|
'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
|
|
'toothbrush'
|
|
]
|
|
|
|
# Detectron2 uses a different numbering scheme, we build a conversion table
|
|
coco2d2 = {}
|
|
count = 0
|
|
for i, c in enumerate(CLASSES):
|
|
if c != "N/A":
|
|
coco2d2[i] = count
|
|
count+=1
|
|
|
|
|
|
# Draw the bounding boxes on image.
|
|
def fig2img(fig):
|
|
buf = io.BytesIO()
|
|
fig.savefig(buf)
|
|
buf.seek(0)
|
|
img = Image.open(buf)
|
|
return img
|
|
|
|
|
|
# Draw the bounding boxes.
|
|
def visualize_prediction(result):
|
|
palette = itertools.cycle(sns.color_palette())
|
|
|
|
# The segmentation is stored in a special-format png
|
|
panoptic_seg = Image.open(io.BytesIO(result['png_string']))
|
|
panoptic_seg = numpy.array(panoptic_seg, dtype=numpy.uint8).copy()
|
|
# We retrieve the ids corresponding to each mask
|
|
panoptic_seg_id = rgb2id(panoptic_seg)
|
|
|
|
# Finally we color each mask individually
|
|
panoptic_seg[:, :, :] = 0
|
|
for id in range(panoptic_seg_id.max() + 1):
|
|
panoptic_seg[panoptic_seg_id == id] = numpy.asarray(next(palette)) * 255
|
|
plt.figure(figsize=(15,15))
|
|
plt.imshow(panoptic_seg)
|
|
plt.axis('off')
|
|
return fig2img(plt.gcf())
|
|
|
|
|
|
feature_extractor = DetrFeatureExtractor.from_pretrained("facebook/detr-resnet-50-panoptic")
|
|
model = DetrForSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic")
|
|
|
|
|
|
transform = T.Compose([
|
|
T.Resize(800),
|
|
T.ToTensor(),
|
|
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
|
])
|
|
model.eval();
|
|
|
|
|
|
def detect_objects(image_input):
|
|
inputs = feature_extractor(images=image_input, return_tensors="pt")
|
|
outputs = model(**inputs)
|
|
|
|
processed_sizes = torch.as_tensor(inputs["pixel_values"].shape[-2:]).unsqueeze(0)
|
|
result = feature_extractor.post_process_panoptic(outputs, processed_sizes)[0]
|
|
|
|
#Visualize prediction
|
|
viz_img = visualize_prediction(result)
|
|
|
|
return viz_img
|
|
|
|
|
|
with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo:
|
|
gr.Markdown("""
|
|
<div align='center' ><font size='60'>全景分割</font></div>
|
|
""")
|
|
with gr.Row():
|
|
with gr.Column():
|
|
image = gr.Image(label="图片", type="pil")
|
|
with gr.Row():
|
|
button = gr.Button("提交", variant="primary")
|
|
box2 = gr.Image(label="图片")
|
|
|
|
button.click(fn=detect_objects, inputs=[image], outputs=box2)
|
|
examples = gr.Examples(examples=[['1.jpg'], ['2.jpg']], inputs=[image], label="例子")
|
|
|
|
|
|
if __name__ == '__main__':
|
|
demo.queue().launch(server_name = "0.0.0.0")
|