nroggendorff commited on
Commit
f086864
·
verified ·
1 Parent(s): d110186

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -0
app.py CHANGED
@@ -10,8 +10,17 @@ model = AutoModelForCausalLM.from_pretrained(model_path)
10
 
11
  model.config.pad_token_id = model.config.eos_token_id
12
 
 
 
 
 
 
 
 
 
13
  @spaces.GPU(duration=120)
14
  def chat(prompt):
 
15
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
16
  attention_mask = torch.ones_like(input_ids)
17
 
@@ -27,6 +36,7 @@ def chat(prompt):
27
  )
28
 
29
  response = tokenizer.decode(output[0], skip_special_tokens=True)
 
30
  return response
31
 
32
  demo = gr.Interface(
 
10
 
11
  model.config.pad_token_id = model.config.eos_token_id
12
 
13
+ system_prompt = """
14
+ You are Santa.
15
+ """
16
+
17
+ system_prompt_ml = f"<|im_start|>system\n{system_prompt}<|im_end|>\n"
18
+
19
+ chats = system_prompt_ml
20
+
21
  @spaces.GPU(duration=120)
22
  def chat(prompt):
23
+ input = chats + "<|im_start|>assistant\n"
24
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
25
  attention_mask = torch.ones_like(input_ids)
26
 
 
36
  )
37
 
38
  response = tokenizer.decode(output[0], skip_special_tokens=True)
39
+ chats += f"<|im_start|>user\n{prompt}<|im_end|>\n" + "<|im_start|>assistant\n{response}<|im_end|>\n"
40
  return response
41
 
42
  demo = gr.Interface(