thunder-007 commited on
Commit
4e97e1e
·
verified ·
1 Parent(s): cfe1bf5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,14 +5,14 @@ from llm import Gemma2B
5
  llm = Gemma2B()
6
 
7
 
8
- def echo(message, history, system_prompt, tokens):
9
  chat_template = []
10
  for user, model in history:
11
  chat_template = [
12
  {"role": "user", "content": user},
13
  {"role": "model", "content": model},
14
  ]
15
- response = llm.inference_quantized_4bit(
16
  chat_template + [{"role": "user", "content": message}
17
  ]).split("<start_of_turn>")[-1].strip("model").strip("<eos>")
18
  for i in range(max(len(response), int(tokens))):
@@ -20,7 +20,7 @@ def echo(message, history, system_prompt, tokens):
20
  yield response[: i + 1]
21
 
22
 
23
- demo = gr.ChatInterface(echo,
24
  additional_inputs=[
25
  gr.Textbox("You are helpful AI.", label="System Prompt"),
26
  gr.Slider(10, 200, 100, label="Tokens")
 
5
  llm = Gemma2B()
6
 
7
 
8
+ def inference(message, history, system_prompt, tokens):
9
  chat_template = []
10
  for user, model in history:
11
  chat_template = [
12
  {"role": "user", "content": user},
13
  {"role": "model", "content": model},
14
  ]
15
+ response = llm.inference_cpu(
16
  chat_template + [{"role": "user", "content": message}
17
  ]).split("<start_of_turn>")[-1].strip("model").strip("<eos>")
18
  for i in range(max(len(response), int(tokens))):
 
20
  yield response[: i + 1]
21
 
22
 
23
+ demo = gr.ChatInterface(inference,
24
  additional_inputs=[
25
  gr.Textbox("You are helpful AI.", label="System Prompt"),
26
  gr.Slider(10, 200, 100, label="Tokens")