Spaces:
Runtime error
Runtime error
Commit
·
b93be81
1
Parent(s):
2b07352
changed output to only bot message
Browse files
app.py
CHANGED
@@ -56,7 +56,7 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
|
|
56 |
return "", chat_history
|
57 |
|
58 |
def vicuna_respond(tab_name, message, chat_history):
|
59 |
-
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
60 |
print('Prompt + Context:')
|
61 |
print(formatted_prompt)
|
62 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
@@ -66,10 +66,10 @@ def vicuna_respond(tab_name, message, chat_history):
|
|
66 |
|
67 |
chat_history.append((formatted_prompt, bot_message))
|
68 |
time.sleep(2)
|
69 |
-
return tab_name, "",
|
70 |
|
71 |
def llama_respond(tab_name, message, chat_history):
|
72 |
-
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
73 |
print('Prompt + Context:')
|
74 |
print(formatted_prompt)
|
75 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
@@ -79,7 +79,7 @@ def llama_respond(tab_name, message, chat_history):
|
|
79 |
|
80 |
chat_history.append((formatted_prompt, bot_message))
|
81 |
time.sleep(2)
|
82 |
-
return tab_name, "",
|
83 |
|
84 |
def interface():
|
85 |
gr.Markdown(" Description ")
|
|
|
56 |
return "", chat_history
|
57 |
|
58 |
def vicuna_respond(tab_name, message, chat_history):
|
59 |
+
formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
|
60 |
print('Prompt + Context:')
|
61 |
print(formatted_prompt)
|
62 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
|
|
66 |
|
67 |
chat_history.append((formatted_prompt, bot_message))
|
68 |
time.sleep(2)
|
69 |
+
return tab_name, "", bot_message
|
70 |
|
71 |
def llama_respond(tab_name, message, chat_history):
|
72 |
+
formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
|
73 |
print('Prompt + Context:')
|
74 |
print(formatted_prompt)
|
75 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
|
|
79 |
|
80 |
chat_history.append((formatted_prompt, bot_message))
|
81 |
time.sleep(2)
|
82 |
+
return tab_name, "", bot_message
|
83 |
|
84 |
def interface():
|
85 |
gr.Markdown(" Description ")
|