Shreyas094 commited on
Commit
08ba31b
·
verified ·
1 Parent(s): 5a2c5c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -7
app.py CHANGED
@@ -231,6 +231,7 @@ def respond(message, history, model, temperature, num_calls, use_web_search):
231
  logging.info(f"Generated Response (first line): {first_line}")
232
  yield response
233
  else:
 
234
  if model == "@cf/meta/llama-3.1-8b-instruct":
235
  # Use Cloudflare API
236
  embed = get_embeddings()
@@ -330,10 +331,10 @@ def split_content_and_sources(text):
330
 
331
  def get_response_with_search(query, model, num_calls=3, temperature=0.2):
332
  search_results = duckduckgo_search(query)
333
- context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
334
  for result in search_results if 'body' in result)
335
 
336
- prompt = f"""Using the following context:
337
  {context}
338
  Write a detailed and complete research document that fulfills the following user request: '{query}'
339
  After writing the document, please provide a list of sources used in your response."""
@@ -341,12 +342,12 @@ After writing the document, please provide a list of sources used in your respon
341
  if model == "@cf/meta/llama-3.1-8b-instruct":
342
  # Use Cloudflare API
343
  for main_content, sources in get_response_from_cloudflare(prompt="", context=context, query=query, num_calls=num_calls, temperature=temperature, search_type="web"):
344
- yield main_content, sources
345
  else:
346
- # Use Hugging Face API
347
  client = InferenceClient(model, token=huggingface_token)
348
 
349
- main_content = ""
350
  for i in range(num_calls):
351
  for message in client.chat_completion(
352
  messages=[{"role": "user", "content": prompt}],
@@ -356,8 +357,16 @@ After writing the document, please provide a list of sources used in your respon
356
  ):
357
  if message.choices and message.choices[0].delta and message.choices[0].delta.content:
358
  chunk = message.choices[0].delta.content
359
- main_content += chunk
360
- yield main_content, "" # Yield partial main content without sources
 
 
 
 
 
 
 
 
361
 
362
  def get_response_from_pdf(query, model, num_calls=3, temperature=0.2):
363
  embed = get_embeddings()
 
231
  logging.info(f"Generated Response (first line): {first_line}")
232
  yield response
233
  else:
234
+ # PDF search logic (unchanged)
235
  if model == "@cf/meta/llama-3.1-8b-instruct":
236
  # Use Cloudflare API
237
  embed = get_embeddings()
 
331
 
332
  def get_response_with_search(query, model, num_calls=3, temperature=0.2):
333
  search_results = duckduckgo_search(query)
334
+ context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['title']}"
335
  for result in search_results if 'body' in result)
336
 
337
+ prompt = f"""Using the following context from web search results:
338
  {context}
339
  Write a detailed and complete research document that fulfills the following user request: '{query}'
340
  After writing the document, please provide a list of sources used in your response."""
 
342
  if model == "@cf/meta/llama-3.1-8b-instruct":
343
  # Use Cloudflare API
344
  for main_content, sources in get_response_from_cloudflare(prompt="", context=context, query=query, num_calls=num_calls, temperature=temperature, search_type="web"):
345
+ yield main_content, sources, search_results
346
  else:
347
+ # Use Hugging Face API for other models
348
  client = InferenceClient(model, token=huggingface_token)
349
 
350
+ full_response = ""
351
  for i in range(num_calls):
352
  for message in client.chat_completion(
353
  messages=[{"role": "user", "content": prompt}],
 
357
  ):
358
  if message.choices and message.choices[0].delta and message.choices[0].delta.content:
359
  chunk = message.choices[0].delta.content
360
+ full_response += chunk
361
+ main_content, sources = split_content_and_sources(full_response)
362
+ yield main_content, sources, search_results
363
+
364
+ def split_content_and_sources(text):
365
+ parts = text.split("Sources:", 1)
366
+ if len(parts) > 1:
367
+ return parts[0].strip(), "Sources:" + parts[1].strip()
368
+ else:
369
+ return text.strip(), ""
370
 
371
  def get_response_from_pdf(query, model, num_calls=3, temperature=0.2):
372
  embed = get_embeddings()