Spaces:
Runtime error
Runtime error
Commit
·
971271a
1
Parent(s):
95980d0
added names to print
Browse files
app.py
CHANGED
@@ -57,7 +57,7 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
|
|
57 |
|
58 |
def vicuna_respond(tab_name, message, chat_history):
|
59 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
60 |
-
print('Prompt + Context:')
|
61 |
print(formatted_prompt)
|
62 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
63 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=149, num_beams=5, no_repeat_ngram_size=2)
|
@@ -70,7 +70,7 @@ def vicuna_respond(tab_name, message, chat_history):
|
|
70 |
|
71 |
def llama_respond(tab_name, message, chat_history):
|
72 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
73 |
-
print('Prompt + Context:')
|
74 |
print(formatted_prompt)
|
75 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
76 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=149, num_beams=5, no_repeat_ngram_size=2)
|
|
|
57 |
|
58 |
def vicuna_respond(tab_name, message, chat_history):
|
59 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
60 |
+
print('Vicuna - Prompt + Context:')
|
61 |
print(formatted_prompt)
|
62 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
63 |
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=149, num_beams=5, no_repeat_ngram_size=2)
|
|
|
70 |
|
71 |
def llama_respond(tab_name, message, chat_history):
|
72 |
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
73 |
+
print('Llama - Prompt + Context:')
|
74 |
print(formatted_prompt)
|
75 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
76 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=149, num_beams=5, no_repeat_ngram_size=2)
|