first commit

This commit is contained in:
Ceyda Cinarel 2021-04-01 01:05:49 +00:00
parent 64fd5fcb92
commit 27503d5d38
7 changed files with 85 additions and 4 deletions

View File

@ -23,10 +23,10 @@ model-index:
metrics: metrics:
- name: Test WER - name: Test WER
type: wer type: wer
value: 29.30 value: 27.08
--- ---
# Wav2Vec2-Base-960-Turkish # Wav2Vec2-Base-760-Turkish
# TBA # TBA
Pretrained Turkish model [ceyda/wav2vec2-base-760](https://huggingface.co/ceyda/wav2vec2-base-760). Fine-tuned on Turkish using the [Common Voice](https://huggingface.co/datasets/common_voice) Pretrained Turkish model [ceyda/wav2vec2-base-760](https://huggingface.co/ceyda/wav2vec2-base-760). Fine-tuned on Turkish using the [Common Voice](https://huggingface.co/datasets/common_voice)
@ -87,7 +87,7 @@ processor = Wav2Vec2Processor.from_pretrained("ceyda/wav2vec2-base-960-turkish")
model = Wav2Vec2ForCTC.from_pretrained("ceyda/wav2vec2-base-960-turkish") model = Wav2Vec2ForCTC.from_pretrained("ceyda/wav2vec2-base-960-turkish")
model.to("cuda") model.to("cuda")
chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“\\\\”\\'\\`…\\’»«]' chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\\”\'\`…\’»«]'
resampler = torchaudio.transforms.Resample(48_000, 16_000) resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets. # Preprocessing the datasets.
@ -117,7 +117,7 @@ result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
``` ```
**Test Result**: XX.XX % (TBA) **Test Result**: 27.08 % (in progress)
## Training ## Training

67
config.json Normal file
View File

@ -0,0 +1,67 @@
{
"_name_or_path": "./pretrained/checkpoint_0.60",
"activation_dropout": 0.1,
"apply_spec_augment": true,
"architectures": [
"Wav2Vec2ForCTC"
],
"attention_dropout": 0.1,
"bos_token_id": 1,
"conv_bias": false,
"conv_dim": [
512,
512,
512,
512,
512,
512,
512
],
"conv_kernel": [
10,
3,
3,
3,
3,
2,
2
],
"conv_stride": [
5,
2,
2,
2,
2,
2,
2
],
"ctc_loss_reduction": "mean",
"ctc_zero_infinity": true,
"do_stable_layer_norm": false,
"eos_token_id": 2,
"feat_extract_activation": "gelu",
"feat_extract_norm": "group",
"feat_proj_dropout": 0.1,
"final_dropout": 0.1,
"gradient_checkpointing": true,
"hidden_act": "gelu",
"hidden_dropout": 0.05,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-05,
"layerdrop": 0.1,
"mask_feature_length": 10,
"mask_feature_prob": 0.0,
"mask_time_length": 10,
"mask_time_prob": 0.5,
"model_type": "wav2vec2",
"num_attention_heads": 12,
"num_conv_pos_embedding_groups": 16,
"num_conv_pos_embeddings": 128,
"num_feat_extract_layers": 7,
"num_hidden_layers": 12,
"pad_token_id": 36,
"transformers_version": "4.5.0.dev0",
"vocab_size": 37
}

8
preprocessor_config.json Normal file
View File

@ -0,0 +1,8 @@
{
"do_normalize": true,
"feature_size": 1,
"padding_side": "right",
"padding_value": 0.0,
"return_attention_mask": true,
"sampling_rate": 16000
}

3
pytorch_model.bin Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ecb5a521af5105ca67abd19a841c08e97b071a8b69e186b06b25633c6fc36804
size 377690860

1
special_tokens_map.json Normal file
View File

@ -0,0 +1 @@
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}

1
tokenizer_config.json Normal file
View File

@ -0,0 +1 @@
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|"}

1
vocab.json Normal file
View File

@ -0,0 +1 @@
{"e": 0, "x": 1, "î": 2, "l": 3, "â": 4, "j": 5, "ç": 6, "ş": 7, "g": 8, "ı": 9, "v": 10, "d": 11, "t": 12, "n": 13, "a": 14, "c": 15, "h": 16, "p": 17, "r": 18, "w": 19, "z": 20, "k": 21, "u": 22, "b": 23, "ü": 24, "y": 26, "o": 27, "q": 28, "m": 29, "f": 30, "s": 31, "ö": 32, "ğ": 33, "i": 34, "|": 25, "[UNK]": 35, "[PAD]": 36}