Compare commits

...

10 Commits

Author SHA1 Message Date
Lysandre 22aad52d43 Adding `safetensors` variant of this model (#7)
- Adding `safetensors` variant of this model (b593867a0567fe9d5be38a6746b2932feaf52894)


Co-authored-by: Nicolas Patry <Narsil@users.noreply.huggingface.co>
2022-11-14 21:37:23 +00:00
patrickvonplaten 7061117562 Remove `soundfile` import (#2)
- Remove `soundfile` import (09b10d0c3aceb1f0cab01de3dc6216e11ab5642e)


Co-authored-by: Sanchit Gandhi <sanchit-gandhi@users.noreply.huggingface.co>
2022-06-30 00:05:41 +00:00
Anton Lozhkov 55bb62306c Update README.md 2022-05-24 10:45:46 +00:00
Patrick von Platen 7ee7fd3d11 Update README.md 2022-04-12 09:28:37 +00:00
Patrick von Platen c0b7a4ab9d Update README.md 2022-04-05 16:39:29 +00:00
Patrick von Platen 5962bda7bb Update README.md 2022-03-24 22:47:32 +00:00
Patrick von Platen 8833830560 Update README.md 2022-03-24 19:07:13 +00:00
Patrick von Platen 5e3d456ec1 Update README.md 2022-02-01 21:32:34 +00:00
Mishig Davaadorj 7717449af4 Upload README.md 2021-11-04 14:15:43 +00:00
Patrick von Platen 6b154c5c31 Update README.md 2021-08-27 15:32:42 +00:00
3 changed files with 47 additions and 27 deletions

1
.gitattributes vendored
View File

@ -15,3 +15,4 @@
*.pt filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text
model.safetensors filter=lfs diff=lfs merge=lfs -text

View File

@ -5,12 +5,44 @@ datasets:
tags: tags:
- audio - audio
- automatic-speech-recognition - automatic-speech-recognition
- hf-asr-leaderboard
license: apache-2.0 license: apache-2.0
widget: widget:
- label: Librispeech sample 1 - example_title: Librispeech sample 1
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
- label: Librispeech sample 2 - example_title: Librispeech sample 2
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
model-index:
- name: wav2vec2-base-960h
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: LibriSpeech (clean)
type: librispeech_asr
config: clean
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 3.4
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: LibriSpeech (other)
type: librispeech_asr
config: other
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 8.6
--- ---
# Wav2Vec2-Base-960h # Wav2Vec2-Base-960h
@ -36,34 +68,26 @@ The original model can be found under https://github.com/pytorch/fairseq/tree/ma
To transcribe audio files the model can be used as a standalone acoustic model as follows: To transcribe audio files the model can be used as a standalone acoustic model as follows:
```python ```python
from transformers import Wav2Vec2CTCTokenizer, Wav2Vec2ForCTC from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from datasets import load_dataset from datasets import load_dataset
import soundfile as sf
import torch import torch
# load model and tokenizer # load model and tokenizer
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
# define function to read in sound file
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
# load dummy dataset and read soundfiles # load dummy dataset and read soundfiles
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
ds = ds.map(map_to_array)
# tokenize # tokenize
input_values = tokenizer(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1 input_values = processor(ds[0]["audio"]["array"], return_tensors="pt", padding="longest").input_values # Batch size 1
# retrieve logits # retrieve logits
logits = model(input_values).logits logits = model(input_values).logits
# take argmax and decode # take argmax and decode
predicted_ids = torch.argmax(logits, dim=-1) predicted_ids = torch.argmax(logits, dim=-1)
transcription = tokenizer.batch_decode(predicted_ids) transcription = processor.batch_decode(predicted_ids)
``` ```
## Evaluation ## Evaluation
@ -72,8 +96,7 @@ To transcribe audio files the model can be used as a standalone acoustic model a
```python ```python
from datasets import load_dataset from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import soundfile as sf
import torch import torch
from jiwer import wer from jiwer import wer
@ -81,26 +104,19 @@ from jiwer import wer
librispeech_eval = load_dataset("librispeech_asr", "clean", split="test") librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
librispeech_eval = librispeech_eval.map(map_to_array)
def map_to_pred(batch): def map_to_pred(batch):
input_values = tokenizer(batch["speech"], return_tensors="pt", padding="longest").input_values input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values
with torch.no_grad(): with torch.no_grad():
logits = model(input_values.to("cuda")).logits logits = model(input_values.to("cuda")).logits
predicted_ids = torch.argmax(logits, dim=-1) predicted_ids = torch.argmax(logits, dim=-1)
transcription = tokenizer.batch_decode(predicted_ids) transcription = processor.batch_decode(predicted_ids)
batch["transcription"] = transcription batch["transcription"] = transcription
return batch return batch
result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["speech"]) result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["audio"])
print("WER:", wer(result["text"], result["transcription"])) print("WER:", wer(result["text"], result["transcription"]))
``` ```

BIN
model.safetensors (Stored with Git LFS) Normal file

Binary file not shown.