From 18cffdddf3ad11cf76cf0027f7d8efe3c576952d Mon Sep 17 00:00:00 2001 From: Qian Liu Date: Thu, 10 Mar 2022 05:35:57 +0000 Subject: [PATCH] Upload config.json --- config.json | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 config.json diff --git a/config.json b/config.json new file mode 100644 index 0000000..9b78755 --- /dev/null +++ b/config.json @@ -0,0 +1,37 @@ +{ + "_name_or_path": "tapex-large-finetuned-wtq", + "activation_dropout": 0.0, + "activation_function": "gelu", + "architectures": [ + "BartForConditionalGeneration" + ], + "attention_dropout": 0.1, + "bos_token_id": 0, + "classifier_dropout": 0.0, + "d_model": 1024, + "decoder_attention_heads": 16, + "decoder_ffn_dim": 4096, + "decoder_layerdrop": 0.0, + "decoder_layers": 12, + "decoder_start_token_id": 2, + "dropout": 0.1, + "encoder_attention_heads": 16, + "encoder_ffn_dim": 4096, + "encoder_layerdrop": 0.0, + "encoder_layers": 12, + "eos_token_id": 2, + "forced_bos_token_id": 0, + "forced_eos_token_id": 2, + "init_std": 0.02, + "is_encoder_decoder": true, + "max_length": 1024, + "max_position_embeddings": 1024, + "model_type": "bart", + "num_hidden_layers": 12, + "pad_token_id": 1, + "scale_embedding": false, + "torch_dtype": "float32", + "transformers_version": "4.17.0.dev0", + "use_cache": true, + "vocab_size": 50265 +}