Update README.md
This commit is contained in:
parent
41c3322bb0
commit
9a22524020
45
README.md
45
README.md
|
@ -58,4 +58,47 @@ To transcribe audio files the model can be used as a standalone acoustic model a
|
|||
# take argmax and decode
|
||||
predicted_ids = torch.argmax(logits, dim=-1)
|
||||
transcription = tokenizer.batch_decode(predicted_ids)
|
||||
```
|
||||
```
|
||||
|
||||
## Evaluation
|
||||
|
||||
This code snippet shows how to evaluate **facebook/wav2vec2-large-960h-lv60-self** on LibriSpeech's "clean" and "other" test data.
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
from transformers import Wav2Vec2ForMaskedLM, Wav2Vec2Tokenizer
|
||||
import soundfile as sf
|
||||
import torch
|
||||
from jiwer import wer
|
||||
|
||||
|
||||
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
|
||||
|
||||
model = Wav2Vec2ForMaskedLM.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to("cuda")
|
||||
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h-lv60-self")
|
||||
|
||||
def map_to_array(batch):
|
||||
speech, _ = sf.read(batch["file"])
|
||||
batch["speech"] = speech
|
||||
return batch
|
||||
|
||||
librispeech_eval = librispeech_eval.map(map_to_array)
|
||||
|
||||
def map_to_pred(batch):
|
||||
input_values = tokenizer(batch["speech"], return_tensors="pt", padding="longest").input_values
|
||||
with torch.no_grad():
|
||||
logits = model(input_values.to("cuda")).logits
|
||||
|
||||
predicted_ids = torch.argmax(logits, dim=-1)
|
||||
transcription = tokenizer.batch_decode(predicted_ids)
|
||||
batch["transcription"] = transcription
|
||||
return batch
|
||||
|
||||
result = librispeech_eval.map(map_to_pred, batched=True, batch_size=16, remove_columns=["speech"])
|
||||
|
||||
print("WER:", wer(result["text"], result["transcription"]))
|
||||
```
|
||||
|
||||
| "clean" | "other" |
|
||||
|---|---|
|
||||
| 2.2 | 5.2 |
|
Loading…
Reference in New Issue