From eed8700e24a0c1c7149bc261aedf6543fb32aa80 Mon Sep 17 00:00:00 2001
From: Niels Rogge <nielsr@users.noreply.huggingface.co>
Date: Tue, 6 Dec 2022 08:17:01 +0000
Subject: [PATCH] fix a typo in code snippet (#2)

- fix a typo in code snippet (3753aeee87a9b3b11c8e4fcff74f0c824a4cafed)
- Update README.md (048d3c90f3f8c5343255801578a89ee6e1caad71)


Co-authored-by: Fatih <fcakyon@users.noreply.huggingface.co>
---
 README.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/README.md b/README.md
index 487c7a2..342ab72 100644
--- a/README.md
+++ b/README.md
@@ -20,16 +20,16 @@ You can use the raw model for video classification into one of the 174 possible
 Here is how to use this model to classify a video:
 
 ```python
-from transformers import TimesformerFeatureExtractor, TimesformerForVideoClassification
+from transformers import AutoImageProcessor, TimesformerForVideoClassification
 import numpy as np
 import torch
 
 video = list(np.random.randn(16, 3, 448, 448))
 
-feature_extractor = TimesformerFeatureExtractor.from_pretrained("facebook/timesformer-hr-finetuned-ssv2")
-model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-hr-finetuned-ssv22")
+processor = AutoImageProcessor.from_pretrained("facebook/timesformer-hr-finetuned-ssv2")
+model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-hr-finetuned-ssv2")
 
-inputs = feature_extractor(video, return_tensors="pt")
+inputs = feature_extractor(images=video, return_tensors="pt")
 
 with torch.no_grad():
   outputs = model(**inputs)