Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,14 +5,14 @@ from llm import Gemma2B
|
|
5 |
llm = Gemma2B()
|
6 |
|
7 |
|
8 |
-
def
|
9 |
chat_template = []
|
10 |
for user, model in history:
|
11 |
chat_template = [
|
12 |
{"role": "user", "content": user},
|
13 |
{"role": "model", "content": model},
|
14 |
]
|
15 |
-
response = llm.
|
16 |
chat_template + [{"role": "user", "content": message}
|
17 |
]).split("<start_of_turn>")[-1].strip("model").strip("<eos>")
|
18 |
for i in range(max(len(response), int(tokens))):
|
@@ -20,7 +20,7 @@ def echo(message, history, system_prompt, tokens):
|
|
20 |
yield response[: i + 1]
|
21 |
|
22 |
|
23 |
-
demo = gr.ChatInterface(
|
24 |
additional_inputs=[
|
25 |
gr.Textbox("You are helpful AI.", label="System Prompt"),
|
26 |
gr.Slider(10, 200, 100, label="Tokens")
|
|
|
5 |
llm = Gemma2B()
|
6 |
|
7 |
|
8 |
+
def inference(message, history, system_prompt, tokens):
|
9 |
chat_template = []
|
10 |
for user, model in history:
|
11 |
chat_template = [
|
12 |
{"role": "user", "content": user},
|
13 |
{"role": "model", "content": model},
|
14 |
]
|
15 |
+
response = llm.inference_cpu(
|
16 |
chat_template + [{"role": "user", "content": message}
|
17 |
]).split("<start_of_turn>")[-1].strip("model").strip("<eos>")
|
18 |
for i in range(max(len(response), int(tokens))):
|
|
|
20 |
yield response[: i + 1]
|
21 |
|
22 |
|
23 |
+
demo = gr.ChatInterface(inference,
|
24 |
additional_inputs=[
|
25 |
gr.Textbox("You are helpful AI.", label="System Prompt"),
|
26 |
gr.Slider(10, 200, 100, label="Tokens")
|