diff --git a/README.md b/README.md index e0487c9..8ba41af 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ from jiwer import wer librispeech_eval = load_dataset("librispeech_asr", "clean", split="test") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to("cuda") -tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h-lv60-self") +tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-large-960h-lv60-self") def map_to_array(batch): speech, _ = sf.read(batch["file"]) @@ -86,9 +86,12 @@ def map_to_array(batch): librispeech_eval = librispeech_eval.map(map_to_array) def map_to_pred(batch): - input_values = tokenizer(batch["speech"], return_tensors="pt", padding="longest").input_values + inputs = tokenizer(batch["speech"], return_tensors="pt", padding="longest") + input_values = inputs.input_values.to("cuda") + attention_mask = inputs.attention_mask.to("cuda") + with torch.no_grad(): - logits = model(input_values.to("cuda")).logits + logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = tokenizer.batch_decode(predicted_ids) @@ -104,4 +107,4 @@ print("WER:", wer(result["text"], result["transcription"])) | "clean" | "other" | |---|---| -| 2.2 | 5.2 | \ No newline at end of file +| 1.9 | 3.9 | \ No newline at end of file