Compare commits

..

No commits in common. "main" and "dev" have entirely different histories.
main ... dev

6 changed files with 37 additions and 67 deletions

View File

@ -9,7 +9,7 @@ tags:
--- ---
# ChatGLM-6B-INT4 # ChatGLM-6B-INT4
<p align="center"> <p align="center">
👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1udqapmrr-ocT1DS_mxWe6dDY8ahRWzg" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md" target="_blank">WeChat</a> 👋 Join our <a href="https://join.slack.com/t/chatglm/shared_invite/zt-1t4a8evfn-vduo2hhNcYqBUnZ71IXiqQ" target="_blank">Slack</a> and <a href="https://github.com/THUDM/ChatGLM-6B/blob/main/resources/WECHAT.md" target="_blank">WeChat</a>
</p> </p>
## 介绍 ## 介绍

View File

@ -10,16 +10,16 @@
}, },
"bos_token_id": 130004, "bos_token_id": 130004,
"eos_token_id": 130005, "eos_token_id": 130005,
"mask_token_id": 130000,
"gmask_token_id": 130001, "gmask_token_id": 130001,
"pad_token_id": 3,
"hidden_size": 4096, "hidden_size": 4096,
"inner_hidden_size": 16384, "inner_hidden_size": 16384,
"layernorm_epsilon": 1e-05, "layernorm_epsilon": 1e-05,
"mask_token_id": 130000,
"max_sequence_length": 2048, "max_sequence_length": 2048,
"model_type": "chatglm", "model_type": "chatglm",
"num_attention_heads": 32, "num_attention_heads": 32,
"num_layers": 28, "num_layers": 28,
"pad_token_id": 3,
"position_encoding_2d": true, "position_encoding_2d": true,
"quantization_bit": 4, "quantization_bit": 4,
"quantization_embeddings": false, "quantization_embeddings": false,

View File

@ -918,7 +918,7 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
elif input_ids is not None: elif input_ids is not None:
batch_size, seq_length = input_ids.shape[:2] batch_size, seq_length = input_ids.shape[:2]
elif inputs_embeds is not None: elif inputs_embeds is not None:
batch_size, seq_length = inputs_embeds.shape[:2] batch_size, seq_length, _ = inputs_embeds.shape[:2]
else: else:
raise ValueError("You have to specify either input_ids or inputs_embeds") raise ValueError("You have to specify either input_ids or inputs_embeds")
@ -972,8 +972,9 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
if attention_mask is None: if attention_mask is None:
attention_mask = torch.zeros(1, 1, device=input_ids.device).bool() attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
else: else:
attention_mask = attention_mask.to(hidden_states.device) attention_mask = attention_mask.to(input_ids.device)
for i, layer in enumerate(self.layers): for i, layer in enumerate(self.layers):

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1 version https://git-lfs.github.com/spec/v1
oid sha256:245786435bde9f4593c105ea846fa461fe42bc63c12b738d0272fcaed6276645 oid sha256:35828b49cf23cbae4c27788d4b04fc68c79a276300e09f14d72a49b0b738b4a9
size 3893083075 size 3893083075

File diff suppressed because one or more lines are too long

View File

@ -31,9 +31,6 @@ class TextTokenizer:
def tokenize(self, text): def tokenize(self, text):
return self.sp.EncodeAsPieces(text) return self.sp.EncodeAsPieces(text)
def convert_tokens_to_string(self, tokens):
return self.sp.DecodePieces(tokens)
def convert_tokens_to_ids(self, tokens): def convert_tokens_to_ids(self, tokens):
return [self.sp.PieceToId(token) for token in tokens] return [self.sp.PieceToId(token) for token in tokens]
@ -114,23 +111,14 @@ class SPTokenizer:
tokens = [x + self.num_image_tokens for x in tmp] tokens = [x + self.num_image_tokens for x in tmp]
return tokens if add_dummy_prefix else tokens[2:] return tokens if add_dummy_prefix else tokens[2:]
def postprocess(self, text):
text = text.replace("<n>", "\n")
text = text.replace(SPTokenizer.get_tab_token(), "\t")
for i in range(2, self.max_blank_length + 1):
text = text.replace(self.get_blank_token(i), " " * i)
return text
def decode(self, text_ids: List[int]) -> str: def decode(self, text_ids: List[int]) -> str:
ids = [int(_id) - self.num_image_tokens for _id in text_ids] ids = [int(_id) - self.num_image_tokens for _id in text_ids]
ids = [_id for _id in ids if _id >= 0] ids = [_id for _id in ids if _id >= 0]
text = self._get_text_tokenizer().decode(ids) text = self._get_text_tokenizer().decode(ids)
text = self.postprocess(text) text = text.replace("<n>", "\n")
return text text = text.replace(SPTokenizer.get_tab_token(), "\t")
for i in range(2, self.max_blank_length + 1):
def decode_tokens(self, tokens: List[str]) -> str: text = text.replace(self.get_blank_token(i), " " * i)
text = self._get_text_tokenizer().convert_tokens_to_string(tokens)
text = self.postprocess(text)
return text return text
def tokenize( def tokenize(
@ -268,12 +256,11 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
return seq return seq
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return self.sp_tokenizer.decode_tokens(tokens)
def _decode( def _decode(
self, self,
token_ids: Union[int, List[int]], token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs **kwargs
) -> str: ) -> str:
if isinstance(token_ids, int): if isinstance(token_ids, int):
@ -282,7 +269,7 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
return "" return ""
if self.pad_token_id in token_ids: # remove pad if self.pad_token_id in token_ids: # remove pad
token_ids = list(filter((self.pad_token_id).__ne__, token_ids)) token_ids = list(filter((self.pad_token_id).__ne__, token_ids))
return super()._decode(token_ids, **kwargs) return self.sp_tokenizer.decode(token_ids)
def _convert_token_to_id(self, token): def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """ """ Converts a token (str) in an id using the vocab. """