Shreyas094 commited on
Commit
2653adc
1 Parent(s): 570f979

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -586,7 +586,7 @@ def get_response_from_pdf(query, model, selected_docs, num_calls=3, temperature=
586
  # Use Hugging Face API
587
  messages = [
588
  {"role": "system", "content": "You are a highly specialized financial analyst assistant with expertise in analyzing and summarizing financial documents. Your goal is to provide accurate, detailed, and precise summaries based on the context provided. Avoid making assumptions or adding information that is not explicitly supported by the context from the PDF documents."},
589
- {"role": "user", "content": f"Using the following context from the PDF documents:\n{context_str}\n\nPlease generate a comprehensive and accurate summary addressing the following question: '{query}'. Ensure your response is strictly based on the provided context, highlighting key financial metrics, trends, and significant details relevant to the query. Avoid any speculative or unverified information."}
590
  ]
591
 
592
  client = InferenceClient(model, token=huggingface_token)
 
586
  # Use Hugging Face API
587
  messages = [
588
  {"role": "system", "content": "You are a highly specialized financial analyst assistant with expertise in analyzing and summarizing financial documents. Your goal is to provide accurate, detailed, and precise summaries based on the context provided. Avoid making assumptions or adding information that is not explicitly supported by the context from the PDF documents."},
589
+ {"role": "user", "content": f"Using the following context from the PDF documents:\n{context_str}\n\nPlease generate a step-by-step reasoning before arriving at a comprehensive and accurate summary addressing the following question: '{query}'. Ensure your response is strictly based on the provided context, highlighting key financial metrics, trends, and significant details relevant to the query. Avoid any speculative or unverified information."}
590
  ]
591
 
592
  client = InferenceClient(model, token=huggingface_token)