diff --git a/config.json b/config.json index 82b9211..2ab3caf 100644 --- a/config.json +++ b/config.json @@ -8,9 +8,11 @@ "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration" }, - "bos_token_id": 150004, - "eos_token_id": 150005, - "pad_token_id": 20003, + "bos_token_id": 130004, + "eos_token_id": 130005, + "mask_token_id": 130000, + "gmask_token_id": 130001, + "pad_token_id": 3, "hidden_size": 4096, "inner_hidden_size": 16384, "layernorm_epsilon": 1e-05, @@ -22,5 +24,5 @@ "torch_dtype": "float16", "transformers_version": "4.23.1", "use_cache": true, - "vocab_size": 150528 + "vocab_size": 130528 } diff --git a/configuration_chatglm.py b/configuration_chatglm.py index ac3ea00..78f3425 100644 --- a/configuration_chatglm.py +++ b/configuration_chatglm.py @@ -66,6 +66,8 @@ class ChatGLMConfig(PretrainedConfig): use_cache=False, bos_token_id=150004, eos_token_id=150005, + mask_token_id=150000, + gmask_token_id=150001, pad_token_id=0, max_sequence_length=2048, inner_hidden_size=16384, @@ -86,6 +88,8 @@ class ChatGLMConfig(PretrainedConfig): self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id + self.mask_token_id = mask_token_id + self.gmask_token_id = gmask_token_id self.position_encoding_2d = position_encoding_2d self.quantization_bit = quantization_bit self.pre_seq_len = pre_seq_len diff --git a/modeling_chatglm.py b/modeling_chatglm.py index 77c3bdd..4887139 100644 --- a/modeling_chatglm.py +++ b/modeling_chatglm.py @@ -921,7 +921,7 @@ class ChatGLMModel(ChatGLMPreTrainedModel): if position_ids is None: - MASK, gMASK = 150000, 150001 + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id mask_token = MASK if MASK in input_ids else gMASK use_gmask = False if MASK in input_ids else True @@ -1084,7 +1084,7 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): **kwargs ) -> dict: batch_size, seq_length = input_ids.shape - MASK, gMASK = 150000, 150001 + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id mask_token = MASK if MASK in input_ids else gMASK use_gmask = False if MASK in input_ids else True seqs = input_ids.tolist() diff --git a/tokenization_chatglm.py b/tokenization_chatglm.py index 2808c04..02aa8cd 100644 --- a/tokenization_chatglm.py +++ b/tokenization_chatglm.py @@ -48,11 +48,13 @@ class SPTokenizer: def __init__( self, vocab_file, + num_image_tokens=20000, max_blank_length=80, byte_fallback=True, ): assert vocab_file is not None self.vocab_file = vocab_file + self.num_image_tokens = num_image_tokens self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "", "", "", "", ""] self.max_blank_length = max_blank_length self.byte_fallback = byte_fallback @@ -70,10 +72,6 @@ class SPTokenizer: def get_tab_token(): return f"<|tab|>" - @property - def num_image_tokens(self): - return 20000 - @property def num_text_tokens(self): return self.text_tokenizer.num_tokens @@ -178,6 +176,7 @@ class ChatGLMTokenizer(PreTrainedTokenizer): mask_token='[MASK]', gmask_token='[gMASK]', padding_side="left", + num_image_tokens=20000, **kwargs ) -> None: super().__init__( @@ -197,7 +196,7 @@ class ChatGLMTokenizer(PreTrainedTokenizer): self.mask_token = mask_token self.gmask_token = gmask_token - self.sp_tokenizer = SPTokenizer(vocab_file) + self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens) """ Initialisation """ diff --git a/tokenizer_config.json b/tokenizer_config.json index 21c1748..59eab0c 100644 --- a/tokenizer_config.json +++ b/tokenizer_config.json @@ -10,6 +10,7 @@ "remove_space": false, "do_lower_case": false, "tokenizer_class": "ChatGLMTokenizer", + "num_image_tokens": 0, "auto_map": { "AutoTokenizer": [ "tokenization_chatglm.ChatGLMTokenizer",