Update README.md
This commit is contained in:
parent
63fb1ed2d0
commit
d72d8ffa1e
22
README.md
22
README.md
|
@ -48,25 +48,17 @@ To transcribe audio files the model can be used as a standalone acoustic model a
|
||||||
```python
|
```python
|
||||||
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
|
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset
|
||||||
import soundfile as sf
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
# load model and processor
|
# load model and processor
|
||||||
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
|
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
|
||||||
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
|
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
|
||||||
|
|
||||||
# define function to read in sound file
|
|
||||||
def map_to_array(batch):
|
|
||||||
speech, _ = sf.read(batch["file"])
|
|
||||||
batch["speech"] = speech
|
|
||||||
return batch
|
|
||||||
|
|
||||||
# load dummy dataset and read soundfiles
|
# load dummy dataset and read soundfiles
|
||||||
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
||||||
ds = ds.map(map_to_array)
|
|
||||||
|
|
||||||
# tokenize
|
# tokenize
|
||||||
input_values = processor(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1
|
input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values
|
||||||
|
|
||||||
# retrieve logits
|
# retrieve logits
|
||||||
logits = model(input_values).logits
|
logits = model(input_values).logits
|
||||||
|
@ -83,7 +75,6 @@ To transcribe audio files the model can be used as a standalone acoustic model a
|
||||||
```python
|
```python
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset
|
||||||
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
||||||
import soundfile as sf
|
|
||||||
import torch
|
import torch
|
||||||
from jiwer import wer
|
from jiwer import wer
|
||||||
|
|
||||||
|
@ -93,15 +84,8 @@ librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
|
||||||
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to("cuda")
|
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to("cuda")
|
||||||
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
|
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
|
||||||
|
|
||||||
def map_to_array(batch):
|
|
||||||
speech, _ = sf.read(batch["file"])
|
|
||||||
batch["speech"] = speech
|
|
||||||
return batch
|
|
||||||
|
|
||||||
librispeech_eval = librispeech_eval.map(map_to_array)
|
|
||||||
|
|
||||||
def map_to_pred(batch):
|
def map_to_pred(batch):
|
||||||
inputs = processor(batch["speech"], return_tensors="pt", padding="longest")
|
inputs = processor(batch["audio"]["array"], return_tensors="pt", padding="longest")
|
||||||
input_values = inputs.input_values.to("cuda")
|
input_values = inputs.input_values.to("cuda")
|
||||||
attention_mask = inputs.attention_mask.to("cuda")
|
attention_mask = inputs.attention_mask.to("cuda")
|
||||||
|
|
||||||
|
@ -113,7 +97,7 @@ def map_to_pred(batch):
|
||||||
batch["transcription"] = transcription
|
batch["transcription"] = transcription
|
||||||
return batch
|
return batch
|
||||||
|
|
||||||
result = librispeech_eval.map(map_to_pred, batched=True, batch_size=16, remove_columns=["speech"])
|
result = librispeech_eval.map(map_to_pred, remove_columns=["speech"])
|
||||||
|
|
||||||
print("WER:", wer(result["text"], result["transcription"]))
|
print("WER:", wer(result["text"], result["transcription"]))
|
||||||
```
|
```
|
||||||
|
|
Loading…
Reference in New Issue