Update README.md
This commit is contained in:
parent
7717449af4
commit
5e3d456ec1
|
@ -44,19 +44,12 @@ To transcribe audio files the model can be used as a standalone acoustic model a
|
||||||
# load model and tokenizer
|
# load model and tokenizer
|
||||||
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
|
||||||
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
|
||||||
|
|
||||||
# define function to read in sound file
|
|
||||||
def map_to_array(batch):
|
|
||||||
speech, _ = sf.read(batch["file"])
|
|
||||||
batch["speech"] = speech
|
|
||||||
return batch
|
|
||||||
|
|
||||||
# load dummy dataset and read soundfiles
|
# load dummy dataset and read soundfiles
|
||||||
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
|
||||||
ds = ds.map(map_to_array)
|
|
||||||
|
|
||||||
# tokenize
|
# tokenize
|
||||||
input_values = processor(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1
|
input_values = processor(ds[0]["audio"]["array"], return_tensors="pt", padding="longest").input_values # Batch size 1
|
||||||
|
|
||||||
# retrieve logits
|
# retrieve logits
|
||||||
logits = model(input_values).logits
|
logits = model(input_values).logits
|
||||||
|
|
Loading…
Reference in New Issue