Update README.md

This commit is contained in:
Patrick von Platen 2021-08-27 15:32:42 +00:00 committed by huggingface-web
parent f1f16470cc
commit 6b154c5c31
1 changed files with 8 additions and 8 deletions

View File

@ -36,13 +36,13 @@ The original model can be found under https://github.com/pytorch/fairseq/tree/ma
To transcribe audio files the model can be used as a standalone acoustic model as follows:
```python
from transformers import Wav2Vec2CTCTokenizer, Wav2Vec2ForCTC
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from datasets import load_dataset
import soundfile as sf
import torch
# load model and tokenizer
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
# define function to read in sound file
@ -56,14 +56,14 @@ To transcribe audio files the model can be used as a standalone acoustic model a
ds = ds.map(map_to_array)
# tokenize
input_values = tokenizer(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1
input_values = processor(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1
# retrieve logits
logits = model(input_values).logits
# take argmax and decode
predicted_ids = torch.argmax(logits, dim=-1)
transcription = tokenizer.batch_decode(predicted_ids)
transcription = processor.batch_decode(predicted_ids)
```
## Evaluation
@ -72,7 +72,7 @@ To transcribe audio files the model can be used as a standalone acoustic model a
```python
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import soundfile as sf
import torch
from jiwer import wer
@ -81,7 +81,7 @@ from jiwer import wer
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
@ -91,12 +91,12 @@ def map_to_array(batch):
librispeech_eval = librispeech_eval.map(map_to_array)
def map_to_pred(batch):
input_values = tokenizer(batch["speech"], return_tensors="pt", padding="longest").input_values
input_values = processor(batch["speech"], return_tensors="pt", padding="longest").input_values
with torch.no_grad():
logits = model(input_values.to("cuda")).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = tokenizer.batch_decode(predicted_ids)
transcription = processor.batch_decode(predicted_ids)
batch["transcription"] = transcription
return batch