add files

This commit is contained in:
patil-suraj 2020-12-21 10:03:18 +00:00
parent abf820df3b
commit 3800f6725a
8 changed files with 7830 additions and 0 deletions

25
README.md Normal file
View File

@ -0,0 +1,25 @@
---
language:
- en
thumbnail:
tags:
- convAI
- conversational
- facebook
license: apache-2.0
datasets:
- blended_skill_talk
metrics:
- perplexity
---
## Model description
+ Paper: [Recipes for building an open-domain chatbot](https://arxiv.org/abs/1907.06616)
+ [Original PARLAI Code](https://parl.ai/projects/recipes/)
### Abstract
Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, both asking and answering questions, and displaying knowledge, empathy and personality appropriately, depending on the situation. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter neural models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.

1
added_tokens.json Normal file
View File

@ -0,0 +1 @@
{"<mask>": 8008}

57
config.json Normal file
View File

@ -0,0 +1,57 @@
{
"activation_dropout": 0.0,
"activation_function": "gelu",
"add_bias_logits": false,
"add_final_layer_norm": true,
"architectures": [
"BlenderbotForConditionalGeneration"
],
"attention_dropout": 0.0,
"bos_token_id": 1,
"classif_dropout": 0.0,
"classifier_dropout": 0.0,
"d_model": 1280,
"decoder_attention_heads": 32,
"decoder_ffn_dim": 5120,
"decoder_layerdrop": 0.0,
"decoder_layers": 12,
"do_blenderbot_90_layernorm": true,
"dropout": 0.1,
"encoder_attention_heads": 32,
"encoder_ffn_dim": 5120,
"encoder_layerdrop": 0.0,
"encoder_layers": 2,
"eos_token_id": 2,
"extra_layer_norm": false,
"extra_pos_embeddings": 0,
"force_bos_token_to_be_generated": false,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1",
"2": "LABEL_2"
},
"init_std": 0.02,
"is_encoder_decoder": true,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1,
"LABEL_2": 2
},
"layernorm_variant": "prelayernorm",
"length_penalty": 0.65,
"max_length": 60,
"max_position_embeddings": 128,
"min_length": 20,
"model_type": "bart",
"no_repeat_ngram_size": 3,
"normalize_before": true,
"normalize_embedding": false,
"num_beams": 10,
"num_hidden_layers": 2,
"pad_token_id": 0,
"scale_embedding": true,
"static_position_embeddings": false,
"unk_token_id": 3,
"use_cache": true,
"vocab_size": 8008
}

7741
merges.txt Normal file

File diff suppressed because it is too large Load Diff

BIN
pytorch_model.bin (Stored with Git LFS) Normal file

Binary file not shown.

1
special_tokens_map.json Normal file
View File

@ -0,0 +1 @@
{"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}

1
tokenizer_config.json Normal file
View File

@ -0,0 +1 @@
{"errors": "replace", "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": "true", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 128, "name_or_path": "blenderbot-1B-distill", "special_tokens_map_file": "blenderbot-1B-distill/special_tokens_map.json", "tokenizer_file": null}

1
vocab.json Normal file

File diff suppressed because one or more lines are too long