Spaces:
Runtime error
Runtime error
Commit
·
d5b5de9
1
Parent(s):
6033369
added prints to debug
Browse files
app.py
CHANGED
@@ -181,16 +181,16 @@ def llama_strategies_respond(strategy, task_name, task_ling_ent, message, chat_h
|
|
181 |
demon_chunk = f.read()
|
182 |
formatted_prompt = f'''"{demon_chunk}". Using the Chunking structure above, Chunk the following sentence: "{message}"'''
|
183 |
|
184 |
-
|
185 |
-
|
186 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
187 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
188 |
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
189 |
-
|
190 |
|
191 |
# Remove formatted prompt from bot_message
|
192 |
bot_message = bot_message.replace(formatted_prompt, '')
|
193 |
-
|
194 |
|
195 |
chat_history.insert(0, (formatted_prompt, bot_message))
|
196 |
time.sleep(2)
|
|
|
181 |
demon_chunk = f.read()
|
182 |
formatted_prompt = f'''"{demon_chunk}". Using the Chunking structure above, Chunk the following sentence: "{message}"'''
|
183 |
|
184 |
+
print('Llama Strategies - Prompt + Context:')
|
185 |
+
print(formatted_prompt)
|
186 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
187 |
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
|
188 |
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
189 |
+
print(bot_message)
|
190 |
|
191 |
# Remove formatted prompt from bot_message
|
192 |
bot_message = bot_message.replace(formatted_prompt, '')
|
193 |
+
print(bot_message)
|
194 |
|
195 |
chat_history.insert(0, (formatted_prompt, bot_message))
|
196 |
time.sleep(2)
|