From 5f009df2186c5841582ecd419409b56041180144 Mon Sep 17 00:00:00 2001 From: Niels Rogge Date: Mon, 27 Feb 2023 15:10:23 +0000 Subject: [PATCH] Update README.md --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 6f7fe07..596d075 100644 --- a/README.md +++ b/README.md @@ -34,17 +34,17 @@ fine-tuned versions on a task that interests you. Here is how to use this model: ```python -from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation +from transformers import MaskFormerImageProcessor, MaskFormerForInstanceSegmentation from PIL import Image import requests # load MaskFormer fine-tuned on COCO panoptic segmentation -feature_extractor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-coco") +processor = MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-large-coco") model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-large-coco") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) -inputs = feature_extractor(images=image, return_tensors="pt") +inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) # model predicts class_queries_logits of shape `(batch_size, num_queries)` @@ -52,8 +52,8 @@ outputs = model(**inputs) class_queries_logits = outputs.class_queries_logits masks_queries_logits = outputs.masks_queries_logits -# you can pass them to feature_extractor for postprocessing -result = feature_extractor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] +# you can pass them to processor for postprocessing +result = processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs) predicted_panoptic_map = result["segmentation"] ```