diff --git a/README.md b/README.md index 8f74532..6dfa652 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC import numpy as np import torch -model = Wav2Vec2ForCTC.from_pretrained(r'yongjian/wav2vec2-large-a') +model = Wav2Vec2ForCTC.from_pretrained(r'yongjian/wav2vec2-large-a') # Note: PyTorch Model processor = Wav2Vec2Processor.from_pretrained(r'yongjian/wav2vec2-large-a') # Load input