From e36785d6285ef858c4692be523f4e35deb3b9102 Mon Sep 17 00:00:00 2001 From: weisong Date: Wed, 24 Jul 2024 15:55:39 +0800 Subject: [PATCH] Update model.py --- model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/model.py b/model.py index 19fc9c9..0c778c7 100644 --- a/model.py +++ b/model.py @@ -7,7 +7,7 @@ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIt model_id = 'meta-llama/Llama-2-7b-chat-hf' if torch.cuda.is_available(): - config = AutoConfig.from_pretrained(model_id, use_auth_token ='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') + config = AutoConfig.from_pretrained(model_id, use_auth_token ='hf_*****************************') config.pretraining_tp = 1 model = AutoModelForCausalLM.from_pretrained( model_id, @@ -15,11 +15,11 @@ if torch.cuda.is_available(): torch_dtype=torch.float16, load_in_4bit=True, device_map='auto', - use_auth_token ='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' + use_auth_token ='hf_******************************************' ) else: model = None -tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token ='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') +tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token ='hf_******************************') def get_prompt(message: str, chat_history: list[tuple[str, str]],