From cdaa4f1cc221ac6fbc0e42254c5ad13a4a15891c Mon Sep 17 00:00:00 2001 From: Niels Rogge Date: Mon, 27 Feb 2023 15:08:57 +0000 Subject: [PATCH] Update README.md --- README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 185fd38..6f34200 100644 --- a/README.md +++ b/README.md @@ -34,14 +34,15 @@ fine-tuned versions on a task that interests you. Here is how to use this model: ```python -from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation +from transformers import MaskFormerImageProcessor, MaskFormerForInstanceSegmentation from PIL import Image import requests url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" image = Image.open(requests.get(url, stream=True).raw) -feature_extractor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-ade") -inputs = feature_extractor(images=image, return_tensors="pt") + +processor = MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-large-ade") +inputs = processor(images=image, return_tensors="pt") model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-large-ade") outputs = model(**inputs) @@ -50,9 +51,9 @@ outputs = model(**inputs) class_queries_logits = outputs.class_queries_logits masks_queries_logits = outputs.masks_queries_logits -# you can pass them to feature_extractor for postprocessing +# you can pass them to processor for postprocessing # we refer to the demo notebooks for visualization (see "Resources" section in the MaskFormer docs) -predicted_semantic_map = feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] +predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] ``` For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/maskformer). \ No newline at end of file