From 0631208742971f51ae93ee3d8051b0e5f50f828a Mon Sep 17 00:00:00 2001 From: Niels Rogge Date: Tue, 1 Jun 2021 10:14:31 +0000 Subject: [PATCH] Add code example --- README.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e9b2da9..03c68b9 100644 --- a/README.md +++ b/README.md @@ -28,16 +28,22 @@ You can use the raw model for object detection. See the [model hub](https://hugg Here is how to use this model: ```python -from transformers import ViTFeatureExtractor, ViTModel +from transformers import DetrFeatureExtractor, DetrForObjectDetection from PIL import Image import requests + url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) -feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k') -model = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k') + +feature_extractor = DetrFeatureExtractor.from_pretrained('facebook/detr-resnet-50') +model = DetrForObjectDetection.from_pretrained('facebook/detr-resnet-50') + inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) -last_hidden_states = outputs.last_hidden_state + +# model predicts bounding boxes and corresponding COCO classes +logits = outputs.logits +bboxes = outputs.pred_boxes ``` Currently, both the feature extractor and model support PyTorch.