diff --git a/README.md b/README.md index 9a31ab8..75331f3 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,6 @@ To transcribe audio files the model can be used as a standalone acoustic model a ```python from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor -import soundfile as sf import torch from jiwer import wer @@ -91,15 +90,8 @@ librispeech_eval = load_dataset("librispeech_asr", "clean", split="test") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda") processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") -def map_to_array(batch): - speech, _ = sf.read(batch["file"]) - batch["speech"] = speech - return batch - -librispeech_eval = librispeech_eval.map(map_to_array) - def map_to_pred(batch): - input_values = processor(batch["speech"], return_tensors="pt", padding="longest").input_values + input_values = processor(batch["audio"]["array"], return_tensors="pt", padding="longest").input_values with torch.no_grad(): logits = model(input_values.to("cuda")).logits