From d72d8ffa1e35afd5b35198fbc84ef5bf488eef7a Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 31 Mar 2022 19:51:10 +0000 Subject: [PATCH] Update README.md --- README.md | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index dbd8311..0c4dbf7 100644 --- a/README.md +++ b/README.md @@ -48,25 +48,17 @@ To transcribe audio files the model can be used as a standalone acoustic model a ```python from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC from datasets import load_dataset - import soundfile as sf import torch # load model and processor processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self") - - # define function to read in sound file - def map_to_array(batch): - speech, _ = sf.read(batch["file"]) - batch["speech"] = speech - return batch # load dummy dataset and read soundfiles ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") - ds = ds.map(map_to_array) # tokenize - input_values = processor(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1 + input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values # retrieve logits logits = model(input_values).logits @@ -83,7 +75,6 @@ To transcribe audio files the model can be used as a standalone acoustic model a ```python from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor -import soundfile as sf import torch from jiwer import wer @@ -93,15 +84,8 @@ librispeech_eval = load_dataset("librispeech_asr", "clean", split="test") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to("cuda") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self") -def map_to_array(batch): - speech, _ = sf.read(batch["file"]) - batch["speech"] = speech - return batch - -librispeech_eval = librispeech_eval.map(map_to_array) - def map_to_pred(batch): - inputs = processor(batch["speech"], return_tensors="pt", padding="longest") + inputs = processor(batch["audio"]["array"], return_tensors="pt", padding="longest") input_values = inputs.input_values.to("cuda") attention_mask = inputs.attention_mask.to("cuda") @@ -113,7 +97,7 @@ def map_to_pred(batch): batch["transcription"] = transcription return batch -result = librispeech_eval.map(map_to_pred, batched=True, batch_size=16, remove_columns=["speech"]) +result = librispeech_eval.map(map_to_pred, remove_columns=["speech"]) print("WER:", wer(result["text"], result["transcription"])) ```