jchen8000 commited on
Commit
617ca15
·
verified ·
1 Parent(s): ef4cfc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -1
app.py CHANGED
@@ -92,7 +92,18 @@ def generate_response(query, history, model, temperature, max_tokens, top_p, see
92
  retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 16})
93
  llm = ChatGroq(groq_api_key=os.environ.get("GROQ_API_KEY"), model=model)
94
  custom_rag_prompt = PromptTemplate.from_template(template)
95
-
 
 
 
 
 
 
 
 
 
 
 
96
  rag_chain = (
97
  {"context": retriever | format_docs, "question": RunnablePassthrough()}
98
  | custom_rag_prompt
 
92
  retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 16})
93
  llm = ChatGroq(groq_api_key=os.environ.get("GROQ_API_KEY"), model=model)
94
  custom_rag_prompt = PromptTemplate.from_template(template)
95
+
96
+
97
+     # Step 1: Prepare inputs manually
98
+     docs = retriever.invoke(query)
99
+     context = format_docs(docs)
100
+     inputs = {"context": context, "question": query}
101
+
102
+     # Step 2: Get the final prompt string
103
+     prompt_value = custom_rag_prompt.invoke(inputs)
104
+     final_prompt = prompt_value.to_string()
105
+ print("Final Prompt Sent to LLM:\n", final_prompt)
106
+
107
  rag_chain = (
108
  {"context": retriever | format_docs, "question": RunnablePassthrough()}
109
  | custom_rag_prompt