Shreyas094 commited on
Commit
e1a8672
·
verified ·
1 Parent(s): 3fd1094

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -2
app.py CHANGED
@@ -51,8 +51,13 @@ async def get_response_with_search(query, model, use_embeddings, num_calls=3, te
51
  else:
52
  context = "\n".join([f"{result['title']}\n{result['body']}\nSource: {result['href']}" for result in search_results])
53
 
54
- prompt = f"""Using the following context from web search results:
 
 
 
 
55
  {context}
 
56
  Write a detailed and complete research document that fulfills the following user request: '{query}'
57
  After writing the document, please provide a list of sources used in your response."""
58
 
@@ -63,7 +68,10 @@ After writing the document, please provide a list of sources used in your respon
63
  try:
64
  for _ in range(num_calls):
65
  for response in client.chat_completion(
66
- messages=[{"role": "user", "content": prompt}],
 
 
 
67
  max_tokens=6000,
68
  temperature=temperature,
69
  stream=True,
 
51
  else:
52
  context = "\n".join([f"{result['title']}\n{result['body']}\nSource: {result['href']}" for result in search_results])
53
 
54
+ system_message = """You are a highly knowledgeable AI assistant tasked with providing comprehensive and accurate information based on web search results.
55
+ Your goal is to synthesize the given context into a coherent and detailed response that directly addresses the user's query.
56
+ Please ensure that your response is well-structured, factual, and cites sources where appropriate."""
57
+
58
+ user_message = f"""Using the following context from web search results:
59
  {context}
60
+
61
  Write a detailed and complete research document that fulfills the following user request: '{query}'
62
  After writing the document, please provide a list of sources used in your response."""
63
 
 
68
  try:
69
  for _ in range(num_calls):
70
  for response in client.chat_completion(
71
+ messages=[
72
+ {"role": "system", "content": system_message},
73
+ {"role": "user", "content": user_message}
74
+ ],
75
  max_tokens=6000,
76
  temperature=temperature,
77
  stream=True,