From 5e3d456ec19c346b612925aea3622e0b8e9f381e Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 1 Feb 2022 21:32:34 +0000 Subject: [PATCH] Update README.md --- README.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/README.md b/README.md index 455bb40..635d2b1 100644 --- a/README.md +++ b/README.md @@ -44,19 +44,12 @@ To transcribe audio files the model can be used as a standalone acoustic model a # load model and tokenizer processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") - - # define function to read in sound file - def map_to_array(batch): - speech, _ = sf.read(batch["file"]) - batch["speech"] = speech - return batch # load dummy dataset and read soundfiles ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") - ds = ds.map(map_to_array) # tokenize - input_values = processor(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1 + input_values = processor(ds[0]["audio"]["array"], return_tensors="pt", padding="longest").input_values # Batch size 1 # retrieve logits logits = model(input_values).logits