Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -121,16 +121,22 @@ def load_paper_summaries() -> Dict[str, str]:
|
|
121 |
return {}
|
122 |
|
123 |
|
124 |
-
def hf_inference(model_name, prompt, max_tokens=2000, retries=5):
|
125 |
for attempt in range(retries):
|
126 |
try:
|
127 |
messages = [{"role": "user", "content": prompt}]
|
128 |
-
|
129 |
model=model_name,
|
130 |
messages=messages,
|
131 |
-
max_tokens=max_tokens
|
|
|
132 |
)
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
134 |
except Exception as e:
|
135 |
if attempt == retries - 1:
|
136 |
logger.error(f"Request failed after {retries} retries: {e}")
|
@@ -138,34 +144,51 @@ def hf_inference(model_name, prompt, max_tokens=2000, retries=5):
|
|
138 |
time.sleep(RETRY_DELAY * (1 + attempt))
|
139 |
return {"error": "Request failed after multiple retries."}
|
140 |
|
141 |
-
def ensemble_inference(prompt, models=ENSEMBLE_MODELS, max_tokens=1500):
|
142 |
results = []
|
143 |
-
with ThreadPoolExecutor(max_workers=len(models)) as executor:
|
144 |
-
future_to_model = {executor.submit(hf_inference, model, prompt, max_tokens): model for model in models}
|
145 |
-
for future in as_completed(future_to_model):
|
146 |
-
model = future_to_model[future]
|
147 |
-
try:
|
148 |
-
result = future.result()
|
149 |
-
if "generated_text" in result:
|
150 |
-
results.append({"model": model, "text": result["generated_text"]})
|
151 |
-
except Exception as e:
|
152 |
-
logger.error(f"Error with model {model}: {e}")
|
153 |
-
|
154 |
-
if not results:
|
155 |
-
return {"error": "All models failed to generate responses"}
|
156 |
|
157 |
-
if
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
def tool_search_web(query: str, num_results: int = NUM_RESULTS, safesearch: str = "moderate",
|
171 |
time_filter: Optional[str] = None, region: str = "wt-wt", language: str = "en-us") -> list:
|
@@ -788,7 +811,7 @@ Output:
|
|
788 |
return prompt
|
789 |
|
790 |
def deep_research(prompt):
|
791 |
-
task_description = "You are an advanced research assistant.
|
792 |
research_data = load_research_data()
|
793 |
paper_summaries = load_paper_summaries() # Load paper summaries
|
794 |
|
@@ -818,6 +841,7 @@ def deep_research(prompt):
|
|
818 |
if key_entities:
|
819 |
context.append(f"Identified key entities: {key_entities}")
|
820 |
intermediate_output += f"Identified key entities for focused research: {key_entities_with_descriptions}\n"
|
|
|
821 |
|
822 |
# Initialize progress tracking for each entity.
|
823 |
entity_progress = {entity: {'queries': [], 'insights': []} for entity in key_entities}
|
@@ -829,7 +853,9 @@ def deep_research(prompt):
|
|
829 |
|
830 |
if not focus_areas: # Corrected placement: outside the loop
|
831 |
initial_focus_areas = tool_identify_focus_areas(prompt=prompt)
|
|
|
832 |
research_plan = tool_draft_research_plan(prompt=prompt, entities=key_entities, focus_areas=initial_focus_areas)
|
|
|
833 |
context.append(f"Initial Research Plan: {research_plan[:200]}...") # Add plan to context
|
834 |
intermediate_output += f"Initial Research Plan:\n{research_plan}\n\n"
|
835 |
focus_areas = initial_focus_areas
|
@@ -859,6 +885,7 @@ def deep_research(prompt):
|
|
859 |
|
860 |
if i == 0: #Initial broad search
|
861 |
initial_query = tool_generate_search_query(prompt=prompt)
|
|
|
862 |
if initial_query:
|
863 |
previous_queries.append(initial_query)
|
864 |
entity_progress['general']['queries'].append(initial_query)
|
@@ -875,12 +902,14 @@ def deep_research(prompt):
|
|
875 |
search_results = []
|
876 |
for future in as_completed(futures):
|
877 |
search_results.extend(future.result())
|
|
|
878 |
|
879 |
filtered_search_results = filter_results(search_results, prompt)
|
880 |
|
881 |
if filtered_search_results:
|
882 |
context.append(f"Initial Search Results: {len(filtered_search_results)} items found")
|
883 |
reasoning_output = tool_reason(prompt, filtered_search_results)
|
|
|
884 |
if reasoning_output:
|
885 |
all_insights.append(reasoning_output)
|
886 |
entity_progress['general']['insights'].append(reasoning_output)
|
@@ -897,6 +926,7 @@ def deep_research(prompt):
|
|
897 |
previous_queries=entity_progress[current_entity]['queries'],
|
898 |
focus_areas=focus_areas
|
899 |
)
|
|
|
900 |
|
901 |
if entity_query:
|
902 |
previous_queries.append(entity_query)
|
@@ -916,7 +946,7 @@ def deep_research(prompt):
|
|
916 |
for future in as_completed(futures):
|
917 |
search_results.extend(future.result())
|
918 |
|
919 |
-
|
920 |
filtered_search_results = filter_results(search_results,
|
921 |
f"{prompt} {current_entity}",
|
922 |
previous_snippets=seen_snippets) # Pass existing snippets
|
@@ -930,6 +960,7 @@ def deep_research(prompt):
|
|
930 |
reasoning_context=entity_progress[current_entity]['insights'], # Use entity-specific context
|
931 |
focus_areas=focus_areas
|
932 |
)
|
|
|
933 |
|
934 |
if entity_reasoning:
|
935 |
all_insights.append(entity_reasoning)
|
@@ -946,23 +977,32 @@ def deep_research(prompt):
|
|
946 |
context.append(f"Entity query for {current_entity} yielded no relevant results")
|
947 |
|
948 |
llm_prompt = create_prompt(task_description, prompt, tools, context)
|
949 |
-
llm_response = hf_inference(MAIN_LLM_MODEL, llm_prompt)
|
950 |
|
951 |
if isinstance(llm_response, dict) and "error" in llm_response:
|
952 |
intermediate_output += f"LLM Error: {llm_response['error']}\n"
|
|
|
953 |
continue
|
954 |
|
955 |
-
|
956 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
957 |
continue
|
958 |
|
959 |
try:
|
960 |
-
response_text = llm_response["generated_text"].strip()
|
961 |
response_json = json.loads(response_text) # Parse the JSON response.
|
962 |
intermediate_output += f"Iteration {i+1} - Focus: {current_entity} - Action: {response_text}\n"
|
963 |
except json.JSONDecodeError:
|
964 |
-
intermediate_output += f"Iteration {i+1} - LLM Response (Invalid JSON): {
|
965 |
-
context.append(f"Invalid JSON: {
|
966 |
continue
|
967 |
|
968 |
tool_name = response_json.get("tool")
|
@@ -988,6 +1028,7 @@ def deep_research(prompt):
|
|
988 |
parameters['failed_queries'] = failed_queries
|
989 |
parameters['focus_areas'] = focus_areas
|
990 |
result = tool["function"](**parameters)
|
|
|
991 |
|
992 |
if current_entity != 'general':
|
993 |
entity_progress[current_entity]['queries'].append(result) # Add entity-specific
|
@@ -1027,6 +1068,7 @@ def deep_research(prompt):
|
|
1027 |
parameters['focus_areas'] = focus_areas
|
1028 |
|
1029 |
result = tool["function"](**parameters)
|
|
|
1030 |
|
1031 |
if current_entity != 'general':
|
1032 |
entity_progress[current_entity]['insights'].append(result)
|
@@ -1049,6 +1091,7 @@ def deep_research(prompt):
|
|
1049 |
parameters['prompt'] = prompt
|
1050 |
|
1051 |
result = tool["function"](**parameters)
|
|
|
1052 |
previous_critiques.append(result)
|
1053 |
context.append(f"Critique: {result[:200]}...")
|
1054 |
else:
|
@@ -1056,6 +1099,7 @@ def deep_research(prompt):
|
|
1056 |
|
1057 |
elif tool_name == "identify_contradictions":
|
1058 |
result = tool["function"](**parameters)
|
|
|
1059 |
if result:
|
1060 |
contradictions = result # Keep track of contradictions.
|
1061 |
context.append(f"Identified contradictions: {result}")
|
@@ -1064,6 +1108,7 @@ def deep_research(prompt):
|
|
1064 |
if 'failed_areas' not in parameters:
|
1065 |
parameters['failed_areas'] = failed_areas
|
1066 |
result = tool["function"](**parameters)
|
|
|
1067 |
if result:
|
1068 |
old_focus = set(focus_areas)
|
1069 |
focus_areas = result # Update focus areas
|
@@ -1072,6 +1117,7 @@ def deep_research(prompt):
|
|
1072 |
|
1073 |
elif tool_name == "extract_article":
|
1074 |
result = tool["function"](**parameters)
|
|
|
1075 |
if result:
|
1076 |
context.append(f"Extracted article content from {parameters['url']}: {result[:200]}...")
|
1077 |
# Reason specifically about the extracted article.
|
@@ -1082,6 +1128,7 @@ def deep_research(prompt):
|
|
1082 |
|
1083 |
elif tool_name == "summarize_paper":
|
1084 |
result = tool["function"](**parameters)
|
|
|
1085 |
if result:
|
1086 |
paper_summaries[parameters['paper_text'][:100]] = result # Store by a snippet of the text
|
1087 |
save_paper_summaries(paper_summaries)
|
@@ -1095,6 +1142,7 @@ def deep_research(prompt):
|
|
1095 |
if 'prompt' not in parameters:
|
1096 |
parameters['prompt'] = prompt
|
1097 |
result = tool["function"](**parameters)
|
|
|
1098 |
if result:
|
1099 |
all_insights.append(result) # Add meta-analysis to overall insights.
|
1100 |
context.append(f"Meta-analysis across entities: {result[:200]}...")
|
@@ -1182,7 +1230,8 @@ def deep_research(prompt):
|
|
1182 |
full_output += f"Total iterations: {i+1}\n"
|
1183 |
full_output += f"Total insights generated: {len(all_insights)}\n"
|
1184 |
|
1185 |
-
|
|
|
1186 |
|
1187 |
custom_css = """
|
1188 |
.gradio-container {
|
@@ -1192,6 +1241,7 @@ custom_css = """
|
|
1192 |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
1193 |
line-height: 1.5;
|
1194 |
font-size: 14px;
|
|
|
1195 |
}
|
1196 |
h3 {
|
1197 |
color: #2c3e50;
|
@@ -1210,11 +1260,11 @@ iface = gr.Interface(
|
|
1210 |
inputs=[
|
1211 |
gr.Textbox(lines=5, placeholder="Enter your research question...", label="Research Question")
|
1212 |
],
|
1213 |
-
outputs=gr.
|
1214 |
title="Advanced Multi-Stage Research Assistant",
|
1215 |
description="""This tool performs deep, multi-faceted research, leveraging multiple search engines,
|
1216 |
specialized academic databases, and advanced AI models. It incorporates a persistent knowledge
|
1217 |
-
base using FAISS indexing to avoid redundant searches and build upon previous findings.""",
|
1218 |
examples=[
|
1219 |
["What are the key factors affecting urban tree survival and how do they vary between developing and developed countries?"],
|
1220 |
["Compare and contrast the economic policies of China and the United States over the past two decades, analyzing their impacts on global trade."],
|
@@ -1226,6 +1276,7 @@ iface = gr.Interface(
|
|
1226 |
cache_examples=False,
|
1227 |
css=custom_css,
|
1228 |
allow_flagging="never",
|
|
|
1229 |
)
|
1230 |
|
1231 |
if __name__ == "__main__":
|
|
|
121 |
return {}
|
122 |
|
123 |
|
124 |
+
def hf_inference(model_name, prompt, max_tokens=2000, retries=5, stream=False): # Added stream parameter
|
125 |
for attempt in range(retries):
|
126 |
try:
|
127 |
messages = [{"role": "user", "content": prompt}]
|
128 |
+
response_generator = client.chat.completions.create(
|
129 |
model=model_name,
|
130 |
messages=messages,
|
131 |
+
max_tokens=max_tokens,
|
132 |
+
stream=stream # Pass the stream parameter
|
133 |
)
|
134 |
+
if stream:
|
135 |
+
return response_generator # Return the generator directly
|
136 |
+
else:
|
137 |
+
# If not streaming, get the full response
|
138 |
+
response = next(response_generator) # Consume the first chunk to get complete object
|
139 |
+
return {"generated_text": response.choices[0].message.content}
|
140 |
except Exception as e:
|
141 |
if attempt == retries - 1:
|
142 |
logger.error(f"Request failed after {retries} retries: {e}")
|
|
|
144 |
time.sleep(RETRY_DELAY * (1 + attempt))
|
145 |
return {"error": "Request failed after multiple retries."}
|
146 |
|
147 |
+
def ensemble_inference(prompt, models=ENSEMBLE_MODELS, max_tokens=1500, stream=False): #Added stream
|
148 |
results = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
+
if stream: # If streaming, return a generator that yields from each model
|
151 |
+
def generate_responses():
|
152 |
+
with ThreadPoolExecutor(max_workers=len(models)) as executor:
|
153 |
+
futures = {executor.submit(hf_inference, model, prompt, max_tokens, stream=True): model for model in models}
|
154 |
+
|
155 |
+
for future in as_completed(futures):
|
156 |
+
model = future_to_model[future]
|
157 |
+
try:
|
158 |
+
for chunk in future.result(): # Iterate through chunks
|
159 |
+
yield {"model": model, "text": chunk.choices[0].delta.content} #yield the content of the chunk
|
160 |
+
except Exception as e:
|
161 |
+
logger.error(f"Error with model {model}: {e}")
|
162 |
+
yield {"model": model, "text": f"Error: {e}"}
|
163 |
+
return generate_responses() # return the generator
|
164 |
+
|
165 |
+
else: #Non-streaming behavior
|
166 |
+
with ThreadPoolExecutor(max_workers=len(models)) as executor:
|
167 |
+
future_to_model = {executor.submit(hf_inference, model, prompt, max_tokens, stream=False): model for model in models}
|
168 |
+
for future in as_completed(future_to_model):
|
169 |
+
model = future_to_model[future]
|
170 |
+
try:
|
171 |
+
result = future.result()
|
172 |
+
if "generated_text" in result:
|
173 |
+
results.append({"model": model, "text": result["generated_text"]})
|
174 |
+
except Exception as e:
|
175 |
+
logger.error(f"Error with model {model}: {e}")
|
176 |
+
|
177 |
+
if not results:
|
178 |
+
return {"error": "All models failed to generate responses"}
|
179 |
+
|
180 |
+
if len(results) == 1:
|
181 |
+
return {"generated_text": results[0]["text"]}
|
182 |
+
|
183 |
+
synthesis_prompt = "Synthesize these expert responses into a single coherent answer:\n\n"
|
184 |
+
for result in results:
|
185 |
+
synthesis_prompt += f"Expert {results.index(result) + 1} ({result['model'].split('/')[-1]}):\n{result['text']}\n\n"
|
186 |
+
|
187 |
+
synthesis = hf_inference(MAIN_LLM_MODEL, synthesis_prompt) # Use a consistent model for final synthesis
|
188 |
+
if "generated_text" in synthesis:
|
189 |
+
return synthesis
|
190 |
+
else:
|
191 |
+
return {"generated_text": max(results, key=lambda x: len(x["text"]))["text"]} # Fallback
|
192 |
|
193 |
def tool_search_web(query: str, num_results: int = NUM_RESULTS, safesearch: str = "moderate",
|
194 |
time_filter: Optional[str] = None, region: str = "wt-wt", language: str = "en-us") -> list:
|
|
|
811 |
return prompt
|
812 |
|
813 |
def deep_research(prompt):
|
814 |
+
task_description = "You are an advanced research assistant, designed to be as comprehensive as possible. Use available tools iteratively, focus on different aspects, explore promising leads thoroughly, critically evaluate your findings, and build up a comprehensive understanding of the research topic. Utilize the FAISS index to avoid redundant searches and to build a persistent knowledge base."
|
815 |
research_data = load_research_data()
|
816 |
paper_summaries = load_paper_summaries() # Load paper summaries
|
817 |
|
|
|
841 |
if key_entities:
|
842 |
context.append(f"Identified key entities: {key_entities}")
|
843 |
intermediate_output += f"Identified key entities for focused research: {key_entities_with_descriptions}\n"
|
844 |
+
yield "Identifying key entities... (Completed)"
|
845 |
|
846 |
# Initialize progress tracking for each entity.
|
847 |
entity_progress = {entity: {'queries': [], 'insights': []} for entity in key_entities}
|
|
|
853 |
|
854 |
if not focus_areas: # Corrected placement: outside the loop
|
855 |
initial_focus_areas = tool_identify_focus_areas(prompt=prompt)
|
856 |
+
yield "Identifying initial focus areas...(Completed)"
|
857 |
research_plan = tool_draft_research_plan(prompt=prompt, entities=key_entities, focus_areas=initial_focus_areas)
|
858 |
+
yield "Drafting initial research plan...(Completed)"
|
859 |
context.append(f"Initial Research Plan: {research_plan[:200]}...") # Add plan to context
|
860 |
intermediate_output += f"Initial Research Plan:\n{research_plan}\n\n"
|
861 |
focus_areas = initial_focus_areas
|
|
|
885 |
|
886 |
if i == 0: #Initial broad search
|
887 |
initial_query = tool_generate_search_query(prompt=prompt)
|
888 |
+
yield f"Generating initial search query... (Iteration {i+1})"
|
889 |
if initial_query:
|
890 |
previous_queries.append(initial_query)
|
891 |
entity_progress['general']['queries'].append(initial_query)
|
|
|
902 |
search_results = []
|
903 |
for future in as_completed(futures):
|
904 |
search_results.extend(future.result())
|
905 |
+
yield f"Performing initial searches... (Iteration {i+1})"
|
906 |
|
907 |
filtered_search_results = filter_results(search_results, prompt)
|
908 |
|
909 |
if filtered_search_results:
|
910 |
context.append(f"Initial Search Results: {len(filtered_search_results)} items found")
|
911 |
reasoning_output = tool_reason(prompt, filtered_search_results)
|
912 |
+
yield f"Reasoning about initial search results... (Iteration {i+1})"
|
913 |
if reasoning_output:
|
914 |
all_insights.append(reasoning_output)
|
915 |
entity_progress['general']['insights'].append(reasoning_output)
|
|
|
926 |
previous_queries=entity_progress[current_entity]['queries'],
|
927 |
focus_areas=focus_areas
|
928 |
)
|
929 |
+
yield f"Generating search query for entity: {current_entity}... (Iteration {i+1})"
|
930 |
|
931 |
if entity_query:
|
932 |
previous_queries.append(entity_query)
|
|
|
946 |
for future in as_completed(futures):
|
947 |
search_results.extend(future.result())
|
948 |
|
949 |
+
yield f"Searching for information on entity: {current_entity}... (Iteration {i+1})"
|
950 |
filtered_search_results = filter_results(search_results,
|
951 |
f"{prompt} {current_entity}",
|
952 |
previous_snippets=seen_snippets) # Pass existing snippets
|
|
|
960 |
reasoning_context=entity_progress[current_entity]['insights'], # Use entity-specific context
|
961 |
focus_areas=focus_areas
|
962 |
)
|
963 |
+
yield f"Reasoning about entity: {current_entity}... (Iteration {i+1})"
|
964 |
|
965 |
if entity_reasoning:
|
966 |
all_insights.append(entity_reasoning)
|
|
|
977 |
context.append(f"Entity query for {current_entity} yielded no relevant results")
|
978 |
|
979 |
llm_prompt = create_prompt(task_description, prompt, tools, context)
|
980 |
+
llm_response = hf_inference(MAIN_LLM_MODEL, llm_prompt, stream=True) # Use streaming
|
981 |
|
982 |
if isinstance(llm_response, dict) and "error" in llm_response:
|
983 |
intermediate_output += f"LLM Error: {llm_response['error']}\n"
|
984 |
+
yield f"LLM Error (Iteration {i+1}): {llm_response['error']}" # Display error in output
|
985 |
continue
|
986 |
|
987 |
+
# Process streaming response
|
988 |
+
response_text = ""
|
989 |
+
try:
|
990 |
+
for chunk in llm_response:
|
991 |
+
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
992 |
+
response_text += chunk.choices[0].delta.content
|
993 |
+
yield f"Iteration {i+1} - Thinking... {response_text}" # Real time output
|
994 |
+
|
995 |
+
except Exception as e:
|
996 |
+
intermediate_output += f"Streaming Error: {str(e)}\n"
|
997 |
+
yield f"Streaming Error (Iteration {i+1}): {str(e)}" #Error
|
998 |
continue
|
999 |
|
1000 |
try:
|
|
|
1001 |
response_json = json.loads(response_text) # Parse the JSON response.
|
1002 |
intermediate_output += f"Iteration {i+1} - Focus: {current_entity} - Action: {response_text}\n"
|
1003 |
except json.JSONDecodeError:
|
1004 |
+
intermediate_output += f"Iteration {i+1} - LLM Response (Invalid JSON): {response_text[:100]}...\n"
|
1005 |
+
context.append(f"Invalid JSON: {response_text[:100]}...") # Add invalid JSON to context
|
1006 |
continue
|
1007 |
|
1008 |
tool_name = response_json.get("tool")
|
|
|
1028 |
parameters['failed_queries'] = failed_queries
|
1029 |
parameters['focus_areas'] = focus_areas
|
1030 |
result = tool["function"](**parameters)
|
1031 |
+
yield f"Iteration {i+1} - Generated search query: {result}"
|
1032 |
|
1033 |
if current_entity != 'general':
|
1034 |
entity_progress[current_entity]['queries'].append(result) # Add entity-specific
|
|
|
1068 |
parameters['focus_areas'] = focus_areas
|
1069 |
|
1070 |
result = tool["function"](**parameters)
|
1071 |
+
yield f"Iteration {i+1} - Reasoning about information..."
|
1072 |
|
1073 |
if current_entity != 'general':
|
1074 |
entity_progress[current_entity]['insights'].append(result)
|
|
|
1091 |
parameters['prompt'] = prompt
|
1092 |
|
1093 |
result = tool["function"](**parameters)
|
1094 |
+
yield f"Iteration {i+1} - Critiquing reasoning..."
|
1095 |
previous_critiques.append(result)
|
1096 |
context.append(f"Critique: {result[:200]}...")
|
1097 |
else:
|
|
|
1099 |
|
1100 |
elif tool_name == "identify_contradictions":
|
1101 |
result = tool["function"](**parameters)
|
1102 |
+
yield f"Iteration {i+1} - Identifying contradictions..."
|
1103 |
if result:
|
1104 |
contradictions = result # Keep track of contradictions.
|
1105 |
context.append(f"Identified contradictions: {result}")
|
|
|
1108 |
if 'failed_areas' not in parameters:
|
1109 |
parameters['failed_areas'] = failed_areas
|
1110 |
result = tool["function"](**parameters)
|
1111 |
+
yield f"Iteration {i+1} - Identifying focus areas..."
|
1112 |
if result:
|
1113 |
old_focus = set(focus_areas)
|
1114 |
focus_areas = result # Update focus areas
|
|
|
1117 |
|
1118 |
elif tool_name == "extract_article":
|
1119 |
result = tool["function"](**parameters)
|
1120 |
+
yield f"Iteration {i+1} - Extracting article content..."
|
1121 |
if result:
|
1122 |
context.append(f"Extracted article content from {parameters['url']}: {result[:200]}...")
|
1123 |
# Reason specifically about the extracted article.
|
|
|
1128 |
|
1129 |
elif tool_name == "summarize_paper":
|
1130 |
result = tool["function"](**parameters)
|
1131 |
+
yield f"Iteration {i+1} - Summarizing paper..."
|
1132 |
if result:
|
1133 |
paper_summaries[parameters['paper_text'][:100]] = result # Store by a snippet of the text
|
1134 |
save_paper_summaries(paper_summaries)
|
|
|
1142 |
if 'prompt' not in parameters:
|
1143 |
parameters['prompt'] = prompt
|
1144 |
result = tool["function"](**parameters)
|
1145 |
+
yield f"Iteration {i+1} - Performing meta-analysis..."
|
1146 |
if result:
|
1147 |
all_insights.append(result) # Add meta-analysis to overall insights.
|
1148 |
context.append(f"Meta-analysis across entities: {result[:200]}...")
|
|
|
1230 |
full_output += f"Total iterations: {i+1}\n"
|
1231 |
full_output += f"Total insights generated: {len(all_insights)}\n"
|
1232 |
|
1233 |
+
yield full_output # Final output
|
1234 |
+
|
1235 |
|
1236 |
custom_css = """
|
1237 |
.gradio-container {
|
|
|
1241 |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
1242 |
line-height: 1.5;
|
1243 |
font-size: 14px;
|
1244 |
+
white-space: pre-wrap; /* Preserve newlines and spacing */
|
1245 |
}
|
1246 |
h3 {
|
1247 |
color: #2c3e50;
|
|
|
1260 |
inputs=[
|
1261 |
gr.Textbox(lines=5, placeholder="Enter your research question...", label="Research Question")
|
1262 |
],
|
1263 |
+
outputs=gr.Markdown(label="Research Results", elem_classes=["output-box"]), #Changed to markdown
|
1264 |
title="Advanced Multi-Stage Research Assistant",
|
1265 |
description="""This tool performs deep, multi-faceted research, leveraging multiple search engines,
|
1266 |
specialized academic databases, and advanced AI models. It incorporates a persistent knowledge
|
1267 |
+
base using FAISS indexing to avoid redundant searches and build upon previous findings. Progress is shown in real-time.""",
|
1268 |
examples=[
|
1269 |
["What are the key factors affecting urban tree survival and how do they vary between developing and developed countries?"],
|
1270 |
["Compare and contrast the economic policies of China and the United States over the past two decades, analyzing their impacts on global trade."],
|
|
|
1276 |
cache_examples=False,
|
1277 |
css=custom_css,
|
1278 |
allow_flagging="never",
|
1279 |
+
live=True #for real time streaming
|
1280 |
)
|
1281 |
|
1282 |
if __name__ == "__main__":
|