adding model and tokenizer files

This commit is contained in:
BDoyen 2021-03-24 16:59:02 +00:00
parent 21dc65acf6
commit 59ae9b0aac
5 changed files with 45 additions and 0 deletions

37
config.json Normal file
View File

@ -0,0 +1,37 @@
{
"_name_or_path": "./camembert-base-xnli",
"architectures": [
"CamembertForSequenceClassification"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 5,
"eos_token_id": 6,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "entailment",
"1": "neutral",
"2": "contradiction"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"entailment": 0,
"neutral": 1,
"contradiction": 2
},
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "camembert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"output_past": true,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"transformers_version": "4.4.2",
"type_vocab_size": 1,
"use_cache": true,
"vocab_size": 32005
}

BIN
pytorch_model.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
sentencepiece.bpe.model (Stored with Git LFS) Normal file

Binary file not shown.

1
special_tokens_map.json Normal file
View File

@ -0,0 +1 @@
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"]}

1
tokenizer_config.json Normal file
View File

@ -0,0 +1 @@
{"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"], "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "camembert-base"}