Shreyas094 commited on
Commit
cdba008
·
verified ·
1 Parent(s): cdaacc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -1
app.py CHANGED
@@ -482,7 +482,31 @@ This information is from {date}.
482
  You are an expert AI assistant. Write a detailed summary of the information provided in this source that is relevant to the following user request: '{query}'
483
  Base your summary strictly on the information from this source. Only include information that is directly supported by the given content.
484
  If any part of the information cannot be verified from this source, clearly state that it could not be confirmed."""
485
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
  # Generate an overall summary after processing all sources
487
  overall_prompt = f"""Based on the summaries you've generated for each source: '{accumulated_response}', provide a concise overall summary that addresses the user's query: '{query}'
488
  Highlight any conflicting information or gaps in the available data."""
 
482
  You are an expert AI assistant. Write a detailed summary of the information provided in this source that is relevant to the following user request: '{query}'
483
  Base your summary strictly on the information from this source. Only include information that is directly supported by the given content.
484
  If any part of the information cannot be verified from this source, clearly state that it could not be confirmed."""
485
+
486
+ if model == "@cf/meta/llama-3.1-8b-instruct":
487
+ # Use Cloudflare API
488
+ source_response = ""
489
+ for response in get_response_from_cloudflare(prompt="", context=context, query=query, num_calls=1, temperature=temperature, search_type="web"):
490
+ source_response += response
491
+ accumulated_response += f"Source {i} ({source}):\n\n{source_response}\n\n"
492
+ yield accumulated_response, ""
493
+ else:
494
+ # Use Hugging Face API
495
+ client = InferenceClient(model, token=huggingface_token)
496
+
497
+ source_response = ""
498
+ for message in client.chat_completion(
499
+ messages=[{"role": "user", "content": prompt}],
500
+ max_tokens=2000,
501
+ temperature=temperature,
502
+ stream=True,
503
+ ):
504
+ if message.choices and message.choices[0].delta and message.choices[0].delta.content:
505
+ chunk = message.choices[0].delta.content
506
+ source_response += chunk
507
+ accumulated_response += f"Source {i} ({source}):\n\n{source_response}\n\n"
508
+ yield accumulated_response, ""
509
+
510
  # Generate an overall summary after processing all sources
511
  overall_prompt = f"""Based on the summaries you've generated for each source: '{accumulated_response}', provide a concise overall summary that addresses the user's query: '{query}'
512
  Highlight any conflicting information or gaps in the available data."""