Spaces:
Runtime error
Runtime error
john
commited on
Commit
·
b3f3207
1
Parent(s):
d2c9268
Update app.py
Browse files
app.py
CHANGED
@@ -3,24 +3,25 @@ import wget
|
|
3 |
import gradio as gr
|
4 |
from llama_cpp import Llama
|
5 |
import random
|
6 |
-
|
|
|
7 |
filename = wget.download(url)
|
8 |
llm = Llama(model_path=filename, seed=random.randint(1, 2**31))
|
9 |
|
10 |
with gr.Blocks() as demo:
|
11 |
chatbot = gr.Chatbot()
|
12 |
msg = gr.Textbox()
|
13 |
-
clear = gr.
|
14 |
-
#instruction = gr.Textbox(label="Instruction", placeholder=)
|
15 |
|
16 |
def user(user_message, history):
|
17 |
return gr.update(value="", interactive=True), history + [[user_message, None]]
|
18 |
|
19 |
def bot(history):
|
20 |
-
#instruction = history[-1][1] or ""
|
21 |
user_message = history[-1][0]
|
22 |
-
#token1 = llm.tokenize(b"### Instruction: ")
|
23 |
-
#token2 = llm.tokenize(instruction.encode())
|
24 |
token3 = llm.tokenize(b"### Input: ")
|
25 |
tokens3 = llm.tokenize(user_message.encode())
|
26 |
token4 = llm.tokenize(b"### Response:")
|
|
|
3 |
import gradio as gr
|
4 |
from llama_cpp import Llama
|
5 |
import random
|
6 |
+
|
7 |
+
url = 'https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q2_K.bin'
|
8 |
filename = wget.download(url)
|
9 |
llm = Llama(model_path=filename, seed=random.randint(1, 2**31))
|
10 |
|
11 |
with gr.Blocks() as demo:
|
12 |
chatbot = gr.Chatbot()
|
13 |
msg = gr.Textbox()
|
14 |
+
clear = gr.UploadButton([msg, chatbot]) # Replace gr.ClearButton with gr.UploadButton
|
15 |
+
# instruction = gr.Textbox(label="Instruction", placeholder="")
|
16 |
|
17 |
def user(user_message, history):
|
18 |
return gr.update(value="", interactive=True), history + [[user_message, None]]
|
19 |
|
20 |
def bot(history):
|
21 |
+
# instruction = history[-1][1] or ""
|
22 |
user_message = history[-1][0]
|
23 |
+
# token1 = llm.tokenize(b"### Instruction: ")
|
24 |
+
# token2 = llm.tokenize(instruction.encode())
|
25 |
token3 = llm.tokenize(b"### Input: ")
|
26 |
tokens3 = llm.tokenize(user_message.encode())
|
27 |
token4 = llm.tokenize(b"### Response:")
|