diff --git a/README.md b/README.md index e6e9af0..c0a24e1 100644 --- a/README.md +++ b/README.md @@ -36,13 +36,13 @@ The original model can be found under https://github.com/pytorch/fairseq/tree/ma To transcribe audio files the model can be used as a standalone acoustic model as follows: ```python - from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForCTC + from transformers import Wav2Vec2CTCTokenizer, Wav2Vec2ForCTC from datasets import load_dataset import soundfile as sf import torch # load model and tokenizer - tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") + tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") # define function to read in sound file @@ -81,7 +81,7 @@ from jiwer import wer librispeech_eval = load_dataset("librispeech_asr", "clean", split="test") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda") -tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") +tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"])