diff --git a/README.md b/README.md new file mode 100644 index 0000000..710ab26 --- /dev/null +++ b/README.md @@ -0,0 +1,96 @@ +# Wav2Vec2 Acoustic Model fine-tuned on LibriSpeech + +Original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20. + +Paper: https://arxiv.org/abs/2006.11477 + +## Usage + +Make sure you are working on [this branch](https://github.com/huggingface/transformers/tree/add_wav2vec) (which will be merged to master soon hopefully) of transformers: + +```bash +$ git checkout add_wav2vec +``` + +In the following, we'll show a simple example of how the model can be used for automatic speech recognition. + +First, let's load the model + +```python +from transformers import AutoModelForMaskedLM + +model = AutoModelForMaskedLM.from_pretrained("patrickvonplaten/wav2vec2-base-960h") + +``` + +Next, let's load a dummy librispeech dataset + +```python +from datasets import load_dataset +import soundfile as sf + +libri_speech_dummy = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") + +def map_to_array(batch): + speech_array, _ = sf.read(batch["file"]) + batch["speech"] = speech_array + return batch + +libri_speech_dummy = libri_speech_dummy.map(map_to_array, remove_columns=["file"]) + +# check out dataset +print(libri_speech_dummy) + +input_speech_16kHz = libri_speech_dummy[2]["speech"] +expected_trans = libri_speech_dummy[2]["text"] +``` + +Cool, now we can run an inference pass to retrieve the logits: + +```python +import torch +logits = model(torch.tensor(input_speech_16kHz)[None, :]) + +# use highest probability logits +pred_ids = torch.argmax(logits[0], axis=-1) +``` + +Finally, let's decode the prediction. +Let's create a simple CTC-Decoder: + +```python + import numpy as np + from itertools import groupby + + class Decoder: + def __init__(self, json_dict): + self.dict = json_dict + self.look_up = np.asarray(list(self.dict.keys())) + + def decode(self, ids): + converted_tokens = self.look_up[ids] + fused_tokens = [tok[0] for tok in groupby(converted_tokens)] + output = ' '.join(''.join(''.join(fused_tokens).split("")).split("|")) + return output +``` + +and instantiate with the corresponding dict. + +```python +# hard-coded json dict taken from: https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt +json_dict = {"": 0, "": 1, "": 2, "": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31} + +decoder = Decoder(json_dict=json_dict) +``` + +and decode the result + +```python +pred_trans = decoder.decode(pred_ids) + +print("Prediction:\n", pred_trans) +print("\n" + 50 * "=" + "\n") +print("Correct result:\n", expected_trans) +``` + +🎉 \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..fedbfaf --- /dev/null +++ b/config.json @@ -0,0 +1,51 @@ +{ + "architectures": [ + "Wav2Vec2ForMaskedLM" + ], + "conv_bias": false, + "conv_dim": [ + 512, + 512, + 512, + 512, + 512, + 512, + 512 + ], + "conv_kernel": [ + 10, + 3, + 3, + 3, + 3, + 2, + 2 + ], + "conv_stride": [ + 5, + 2, + 2, + 2, + 2, + 2, + 2 + ], + "do_stable_layer_norm": false, + "feat_extract_activation": "gelu", + "feat_extract_dropout": 0.0, + "feat_extract_norm": "group", + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-05, + "model_type": "wav2vec2", + "num_attention_heads": 12, + "num_conv_pos_embedding_groups": 16, + "num_conv_pos_embeddings": 128, + "num_feat_extract_layers": 7, + "num_hidden_layers": 12, + "transformers_version": "4.3.0.dev0", + "vocab_size": 32 +} diff --git a/pytorch_model.bin b/pytorch_model.bin new file mode 100644 index 0000000..d630db4 --- /dev/null +++ b/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c34f9827b034a1b9141dbf6f652f8a60eda61cdf5771c9e05bfa99033c92cd96 +size 377667514 diff --git a/special_tokens_map.json b/special_tokens_map.json new file mode 100644 index 0000000..25bc396 --- /dev/null +++ b/special_tokens_map.json @@ -0,0 +1 @@ +{"bos_token": "", "eos_token": "", "unk_token": "", "pad_token": ""} \ No newline at end of file diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000..9f2cec8 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1 @@ +{"unk_token": "", "bos_token": "", "eos_token": "", "pad_token": "", "do_lower_case": false} \ No newline at end of file diff --git a/vocab.json b/vocab.json new file mode 100644 index 0000000..88181b9 --- /dev/null +++ b/vocab.json @@ -0,0 +1 @@ +{"": 0, "": 1, "": 2, "": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31} \ No newline at end of file