Slim embedding
This commit is contained in:
parent
68873da921
commit
bfb1a8f2b6
14
config.json
14
config.json
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"_name_or_path": "THUDM/chatglm-6b",
|
||||
"_name_or_path": "THUDM/chatglm-6b-int4",
|
||||
"architectures": [
|
||||
"ChatGLMModel"
|
||||
],
|
||||
|
@ -8,21 +8,23 @@
|
|||
"AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
|
||||
"AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration"
|
||||
},
|
||||
"bos_token_id": 150004,
|
||||
"eos_token_id": 150005,
|
||||
"pad_token_id": 20003,
|
||||
"bos_token_id": 130004,
|
||||
"eos_token_id": 130005,
|
||||
"gmask_token_id": 130001,
|
||||
"hidden_size": 4096,
|
||||
"inner_hidden_size": 16384,
|
||||
"layernorm_epsilon": 1e-05,
|
||||
"mask_token_id": 130000,
|
||||
"max_sequence_length": 2048,
|
||||
"model_type": "chatglm",
|
||||
"num_attention_heads": 32,
|
||||
"num_layers": 28,
|
||||
"pad_token_id": 3,
|
||||
"position_encoding_2d": true,
|
||||
"quantization_bit": 4,
|
||||
"quantization_embeddings": false,
|
||||
"torch_dtype": "float16",
|
||||
"transformers_version": "4.23.1",
|
||||
"transformers_version": "4.27.1",
|
||||
"use_cache": true,
|
||||
"vocab_size": 150528
|
||||
"vocab_size": 130528
|
||||
}
|
|
@ -66,6 +66,8 @@ class ChatGLMConfig(PretrainedConfig):
|
|||
use_cache=False,
|
||||
bos_token_id=150004,
|
||||
eos_token_id=150005,
|
||||
mask_token_id=150000,
|
||||
gmask_token_id=150001,
|
||||
pad_token_id=0,
|
||||
max_sequence_length=2048,
|
||||
inner_hidden_size=16384,
|
||||
|
@ -87,6 +89,8 @@ class ChatGLMConfig(PretrainedConfig):
|
|||
self.bos_token_id = bos_token_id
|
||||
self.eos_token_id = eos_token_id
|
||||
self.pad_token_id = pad_token_id
|
||||
self.mask_token_id = mask_token_id
|
||||
self.gmask_token_id = gmask_token_id
|
||||
self.position_encoding_2d = position_encoding_2d
|
||||
self.quantization_bit = quantization_bit
|
||||
self.quantization_embeddings = quantization_embeddings
|
||||
|
|
|
@ -921,9 +921,9 @@ class ChatGLMModel(ChatGLMPreTrainedModel):
|
|||
|
||||
|
||||
if position_ids is None:
|
||||
MASK, gMASK = 150000, 150001
|
||||
mask_token = MASK if MASK in input_ids else gMASK
|
||||
use_gmask = False if MASK in input_ids else True
|
||||
MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
|
||||
mask_token = gMASK if gMASK in input_ids else MASK
|
||||
use_gmask = True if gMASK in input_ids else False
|
||||
|
||||
mask_positions = [seq.tolist().index(mask_token) for seq in input_ids]
|
||||
position_ids = self.get_position_ids(
|
||||
|
@ -1084,9 +1084,9 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
|||
**kwargs
|
||||
) -> dict:
|
||||
batch_size, seq_length = input_ids.shape
|
||||
MASK, gMASK = 150000, 150001
|
||||
mask_token = MASK if MASK in input_ids else gMASK
|
||||
use_gmask = False if MASK in input_ids else True
|
||||
MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
|
||||
mask_token = gMASK if gMASK in input_ids else MASK
|
||||
use_gmask = True if gMASK in input_ids else False
|
||||
seqs = input_ids.tolist()
|
||||
mask_positions = [seq.index(mask_token) for seq in seqs]
|
||||
|
||||
|
|
|
@ -48,11 +48,13 @@ class SPTokenizer:
|
|||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
num_image_tokens=20000,
|
||||
max_blank_length=80,
|
||||
byte_fallback=True,
|
||||
):
|
||||
assert vocab_file is not None
|
||||
self.vocab_file = vocab_file
|
||||
self.num_image_tokens = num_image_tokens
|
||||
self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "<unused_0>", "<sop>", "<eop>", "<ENC>", "<dBLOCK>"]
|
||||
self.max_blank_length = max_blank_length
|
||||
self.byte_fallback = byte_fallback
|
||||
|
@ -70,10 +72,6 @@ class SPTokenizer:
|
|||
def get_tab_token():
|
||||
return f"<|tab|>"
|
||||
|
||||
@property
|
||||
def num_image_tokens(self):
|
||||
return 20000
|
||||
|
||||
@property
|
||||
def num_text_tokens(self):
|
||||
return self.text_tokenizer.num_tokens
|
||||
|
@ -178,6 +176,7 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
|
|||
mask_token='[MASK]',
|
||||
gmask_token='[gMASK]',
|
||||
padding_side="left",
|
||||
num_image_tokens=20000,
|
||||
**kwargs
|
||||
) -> None:
|
||||
super().__init__(
|
||||
|
@ -197,10 +196,16 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
|
|||
self.mask_token = mask_token
|
||||
self.gmask_token = gmask_token
|
||||
|
||||
self.sp_tokenizer = SPTokenizer(vocab_file)
|
||||
self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens)
|
||||
|
||||
""" Initialisation """
|
||||
|
||||
@property
|
||||
def gmask_token_id(self) -> Optional[int]:
|
||||
if self.gmask_token is None:
|
||||
return None
|
||||
return self.convert_tokens_to_ids(self.gmask_token)
|
||||
|
||||
@property
|
||||
def eop_token_id(self) -> Optional[int]:
|
||||
"""
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name_or_path": "THUDM/chatglm-6b",
|
||||
"name_or_path": "THUDM/chatglm-6b-int4",
|
||||
"bos_token": "<sop>",
|
||||
"eop_token": "<eop>",
|
||||
"eos_token": "</s>",
|
||||
|
@ -10,6 +10,7 @@
|
|||
"remove_space": false,
|
||||
"do_lower_case": false,
|
||||
"tokenizer_class": "ChatGLMTokenizer",
|
||||
"num_image_tokens": 0,
|
||||
"auto_map": {
|
||||
"AutoTokenizer": [
|
||||
"tokenization_chatglm.ChatGLMTokenizer",
|
||||
|
|
Loading…
Reference in New Issue