chatglm-6b-int4/handler.py

24 lines
815 B
Python
Raw Normal View History

2023-03-29 14:21:13 +00:00
from typing import Dict, List, Any
from transformers import AutoTokenizer, AutoModel
import torch
class EndpointHandler:
def __init__(self, path=""):
# load model and processor from path
self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
2023-03-29 17:28:59 +00:00
self.model = AutoModel.from_pretrained(path, trust_remote_code=True).float()
2023-03-29 14:21:13 +00:00
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
"""
Args:
data (:dict:):
The payload with the text prompt and generation parameters.
"""
# process input
inputs = data.pop("inputs", data)
history = data.pop("history", None)
response, new_history = self.model.chat(self.tokenizer, inputs, history)
2023-03-29 14:24:51 +00:00
return [{"generated_text": response}]