research14 commited on
Commit
21090d3
·
1 Parent(s): d067fae

model change llama

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -9,8 +9,8 @@ vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
9
  vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
10
 
11
  # Load the LLaMA 7b model and tokenizer
12
- llama_tokenizer = AutoTokenizer.from_pretrained("luodian/llama-7b-hf")
13
- llama_model = AutoModelForCausalLM.from_pretrained("luodian/llama-7b-hf")
14
 
15
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
16
 
 
9
  vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
10
 
11
  # Load the LLaMA 7b model and tokenizer
12
+ llama_tokenizer = AutoTokenizer.from_pretrained("daryl149/llama-2-7b-chat-hf")
13
+ llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf")
14
 
15
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
16