omeryentur commited on
Commit
ce7ef81
·
verified ·
1 Parent(s): 25869c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -7,8 +7,8 @@ from peft import (
7
  prepare_model_for_kbit_training,
8
  get_peft_model,
9
  )
10
- model_name = "google/gemma-2-2b-it"
11
- lora_model_name="Anlam-Lab/gemma-2-2b-it-anlamlab-SA-Chatgpt4mini"
12
 
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
  model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
@@ -32,7 +32,7 @@ def generate_response(input_text):
32
  )
33
 
34
  response = tokenizer.decode(outputs[0])
35
- return response.split("<start_of_turn>model\n")[1].split("<end_of_turn>")[0]
36
 
37
  iface = gr.Interface(
38
  fn=generate_response,
 
7
  prepare_model_for_kbit_training,
8
  get_peft_model,
9
  )
10
+ model_name = "meta-llama/Llama-3.2-1B"
11
+ lora_model_name="Anlam-Lab/Llama-3.2-1B-it-anlamlab-SA-Chatgpt4mini"
12
 
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
  model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
 
32
  )
33
 
34
  response = tokenizer.decode(outputs[0])
35
+ return response.split("<|end_header_id|>")[-1].split("<|eot_id|>")[0]
36
 
37
  iface = gr.Interface(
38
  fn=generate_response,