diff --git a/modeling_chatglm.py b/modeling_chatglm.py index e2ff3f2..ef38154 100644 --- a/modeling_chatglm.py +++ b/modeling_chatglm.py @@ -1243,7 +1243,7 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): for i, (old_query, response) in enumerate(history): prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) - inputs = tokenizer([prompt], return_tensors="pt", padding=True) + inputs = tokenizer([prompt], return_tensors="pt") inputs = inputs.to(self.device) outputs = self.generate(**inputs, **gen_kwargs) outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] @@ -1269,7 +1269,7 @@ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): for i, (old_query, response) in enumerate(history): prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) - inputs = tokenizer([prompt], return_tensors="pt", padding=True) + inputs = tokenizer([prompt], return_tensors="pt") inputs = inputs.to(self.device) for outputs in self.stream_generate(**inputs, **gen_kwargs): outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]