diff --git a/model.py b/model.py index 19fc9c9..0c778c7 100644 --- a/model.py +++ b/model.py @@ -7,7 +7,7 @@ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, TextIt model_id = 'meta-llama/Llama-2-7b-chat-hf' if torch.cuda.is_available(): - config = AutoConfig.from_pretrained(model_id, use_auth_token ='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') + config = AutoConfig.from_pretrained(model_id, use_auth_token ='hf_*****************************') config.pretraining_tp = 1 model = AutoModelForCausalLM.from_pretrained( model_id, @@ -15,11 +15,11 @@ if torch.cuda.is_available(): torch_dtype=torch.float16, load_in_4bit=True, device_map='auto', - use_auth_token ='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' + use_auth_token ='hf_******************************************' ) else: model = None -tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token ='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') +tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token ='hf_******************************') def get_prompt(message: str, chat_history: list[tuple[str, str]],