Compare commits
No commits in common. "0368c75d052222a1188f1fd5c5b97f4064e58567" and "6d4dae8314d7360c4a4212b4b7d01ed237a836f1" have entirely different histories.
0368c75d05
...
6d4dae8314
|
|
@ -6,4 +6,3 @@
|
|||
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||
|
|
|
|||
62
README.md
62
README.md
|
|
@ -1,62 +0,0 @@
|
|||
---
|
||||
language:
|
||||
- multilingual
|
||||
|
||||
datasets:
|
||||
- squad
|
||||
- arcd
|
||||
- xquad
|
||||
---
|
||||
|
||||
# Multilingual BERT fine-tuned on SQuADv1.1
|
||||
|
||||
[**WandB run link**](https://wandb.ai/salti/mBERT_QA/runs/wkqzhrp2)
|
||||
|
||||
**GPU**: Tesla P100-PCIE-16GB
|
||||
|
||||
## Training Arguments
|
||||
|
||||
```python
|
||||
max_seq_length = 512
|
||||
doc_stride = 256
|
||||
max_answer_length = 64
|
||||
bacth_size = 16
|
||||
gradient_accumulation_steps = 2
|
||||
learning_rate = 5e-5
|
||||
weight_decay = 3e-7
|
||||
num_train_epochs = 3
|
||||
warmup_ratio = 0.1
|
||||
fp16 = True
|
||||
fp16_opt_level = "O1"
|
||||
seed = 0
|
||||
```
|
||||
|
||||
## Results
|
||||
|
||||
| EM | F1 |
|
||||
| :----: | :----: |
|
||||
| 81.731 | 89.009 |
|
||||
|
||||
## Zero-shot performance
|
||||
|
||||
### on ARCD
|
||||
|
||||
| EM | F1 |
|
||||
| :----: | :----: |
|
||||
| 20.655 | 48.051 |
|
||||
|
||||
### on XQuAD
|
||||
|
||||
| Language | EM | F1 |
|
||||
| :--------: | :----: | :----: |
|
||||
| Arabic | 42.185 | 57.803 |
|
||||
| English | 73.529 | 85.01 |
|
||||
| German | 55.882 | 72.555 |
|
||||
| Greek | 45.21 | 62.207 |
|
||||
| Spanish | 58.067 | 76.406 |
|
||||
| Hindi | 40.588 | 55.29 |
|
||||
| Russian | 55.126 | 71.617 |
|
||||
| Thai | 26.891 | 39.965 |
|
||||
| Turkish | 34.874 | 51.138 |
|
||||
| Vietnamese | 47.983 | 68.125 |
|
||||
| Chinese | 47.395 | 58.928 |
|
||||
|
|
@ -1,11 +1,9 @@
|
|||
{
|
||||
"_name_or_path": "bert-base-multilingual-cased",
|
||||
"architectures": [
|
||||
"BertForQuestionAnswering"
|
||||
],
|
||||
"attention_probs_dropout_prob": 0.1,
|
||||
"directionality": "bidi",
|
||||
"gradient_checkpointing": false,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout_prob": 0.1,
|
||||
"hidden_size": 768,
|
||||
|
|
@ -22,9 +20,6 @@
|
|||
"pooler_num_fc_layers": 3,
|
||||
"pooler_size_per_head": 128,
|
||||
"pooler_type": "first_token_transform",
|
||||
"position_embedding_type": "absolute",
|
||||
"transformers_version": "4.4.0.dev0",
|
||||
"type_vocab_size": 2,
|
||||
"use_cache": true,
|
||||
"vocab_size": 119547
|
||||
}
|
||||
|
|
|
|||
BIN
flax_model.msgpack (Stored with Git LFS)
BIN
flax_model.msgpack (Stored with Git LFS)
Binary file not shown.
BIN
pytorch_model.bin (Stored with Git LFS)
BIN
pytorch_model.bin (Stored with Git LFS)
Binary file not shown.
BIN
tf_model.h5 (Stored with Git LFS)
BIN
tf_model.h5 (Stored with Git LFS)
Binary file not shown.
|
|
@ -1 +1 @@
|
|||
{"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "name_or_path": "bert-base-multilingual-cased"}
|
||||
{"do_lower_case": false, "model_max_length": 512, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
||||
BIN
training_args.bin (Stored with Git LFS)
BIN
training_args.bin (Stored with Git LFS)
Binary file not shown.
Loading…
Reference in New Issue