add detr-resnet-101-panoptic
This commit is contained in:
parent
cc4053b698
commit
0dbe302a4b
Binary file not shown.
After Width: | Height: | Size: 314 KiB |
Binary file not shown.
After Width: | Height: | Size: 183 KiB |
|
@ -0,0 +1,21 @@
|
|||
FROM python:3.7.4-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt /app
|
||||
|
||||
RUN pip config set global.index-url https://pypi.mirrors.ustc.edu.cn/simple/
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get upgrade -y && \
|
||||
apt-get install -y git
|
||||
|
||||
RUN apt-get install -y tk
|
||||
|
||||
RUN python -m pip install git+https://github.com/cocodataset/panopticapi.git
|
||||
|
||||
RUN pip3 install --trusted-host pypi.python.org -r requirements.txt
|
||||
|
||||
COPY . /app
|
||||
|
||||
CMD ["python", "detr_panoptic.py"]
|
|
@ -0,0 +1,112 @@
|
|||
#全景分割
|
||||
from PIL import Image
|
||||
import io
|
||||
import matplotlib.pyplot as plt
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
import numpy
|
||||
import gradio as gr
|
||||
import itertools
|
||||
import seaborn as sns
|
||||
from panopticapi.utils import rgb2id
|
||||
|
||||
|
||||
# These are the COCO classes
|
||||
CLASSES = [
|
||||
'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
|
||||
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',
|
||||
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
|
||||
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack',
|
||||
'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
|
||||
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
|
||||
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass',
|
||||
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
|
||||
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
|
||||
'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A',
|
||||
'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
|
||||
'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A',
|
||||
'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
|
||||
'toothbrush'
|
||||
]
|
||||
|
||||
# Detectron2 uses a different numbering scheme, we build a conversion table
|
||||
coco2d2 = {}
|
||||
count = 0
|
||||
for i, c in enumerate(CLASSES):
|
||||
if c != "N/A":
|
||||
coco2d2[i] = count
|
||||
count+=1
|
||||
|
||||
|
||||
# Draw the bounding boxes on image.
|
||||
def fig2img(fig):
|
||||
buf = io.BytesIO()
|
||||
fig.savefig(buf)
|
||||
buf.seek(0)
|
||||
img = Image.open(buf)
|
||||
return img
|
||||
|
||||
|
||||
# Draw the bounding boxes.
|
||||
def visualize_prediction(result):
|
||||
palette = itertools.cycle(sns.color_palette())
|
||||
|
||||
# The segmentation is stored in a special-format png
|
||||
panoptic_seg = Image.open(io.BytesIO(result['png_string']))
|
||||
panoptic_seg = numpy.array(panoptic_seg, dtype=numpy.uint8).copy()
|
||||
# We retrieve the ids corresponding to each mask
|
||||
panoptic_seg_id = rgb2id(panoptic_seg)
|
||||
|
||||
# Finally we color each mask individually
|
||||
panoptic_seg[:, :, :] = 0
|
||||
for id in range(panoptic_seg_id.max() + 1):
|
||||
panoptic_seg[panoptic_seg_id == id] = numpy.asarray(next(palette)) * 255
|
||||
plt.figure(figsize=(15,15))
|
||||
plt.imshow(panoptic_seg)
|
||||
plt.axis('off')
|
||||
return fig2img(plt.gcf())
|
||||
|
||||
model, postprocessor = torch.hub.load('facebookresearch/detr',
|
||||
'detr_resnet101_panoptic',
|
||||
pretrained=True,
|
||||
return_postprocessor=True,
|
||||
num_classes=250)
|
||||
|
||||
transform = T.Compose([
|
||||
T.Resize(800),
|
||||
T.ToTensor(),
|
||||
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
||||
])
|
||||
model.eval();
|
||||
|
||||
|
||||
def detect_objects(image_input):
|
||||
model.eval();
|
||||
|
||||
# if image comes from upload
|
||||
if image_input:
|
||||
image = image_input
|
||||
|
||||
# mean-std normalize the input image (batch-size: 1)
|
||||
img = transform(image).unsqueeze(0)
|
||||
out = model(img)
|
||||
|
||||
# the post-processor expects as input the target size of the predictions (which we set here to the image size)
|
||||
result = postprocessor(out, torch.as_tensor(img.shape[-2:]).unsqueeze(0))[0]
|
||||
|
||||
#Visualize prediction
|
||||
viz_img = visualize_prediction(result)
|
||||
|
||||
return viz_img
|
||||
|
||||
|
||||
demo = gr.Interface(detect_objects,
|
||||
inputs = gr.Image(type='pil'),
|
||||
outputs = gr.Image(shape=(650,650)),
|
||||
title = "全景分割",
|
||||
allow_flagging="never",
|
||||
examples = ['1.jpg', '2.jpg'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
demo.queue().launch(server_name = "0.0.0.0", server_port = 7003, max_threads=40)
|
|
@ -0,0 +1,11 @@
|
|||
gradio
|
||||
huggingface
|
||||
torch
|
||||
transformers
|
||||
seaborn
|
||||
matplotlib
|
||||
pillow
|
||||
requests
|
||||
torchvision
|
||||
numpy
|
||||
scipy
|
Loading…
Reference in New Issue