Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -121,22 +121,16 @@ def load_paper_summaries() -> Dict[str, str]:
|
|
121 |
return {}
|
122 |
|
123 |
|
124 |
-
def hf_inference(model_name, prompt, max_tokens=2000, retries=5
|
125 |
for attempt in range(retries):
|
126 |
try:
|
127 |
messages = [{"role": "user", "content": prompt}]
|
128 |
-
|
129 |
model=model_name,
|
130 |
messages=messages,
|
131 |
-
max_tokens=max_tokens
|
132 |
-
stream=stream # Pass the stream parameter
|
133 |
)
|
134 |
-
|
135 |
-
return response_generator # Return the generator directly
|
136 |
-
else:
|
137 |
-
# If not streaming, get the full response
|
138 |
-
response = next(response_generator) # Consume the first chunk to get complete object
|
139 |
-
return {"generated_text": response.choices[0].message.content}
|
140 |
except Exception as e:
|
141 |
if attempt == retries - 1:
|
142 |
logger.error(f"Request failed after {retries} retries: {e}")
|
@@ -144,51 +138,34 @@ def hf_inference(model_name, prompt, max_tokens=2000, retries=5, stream=False):
|
|
144 |
time.sleep(RETRY_DELAY * (1 + attempt))
|
145 |
return {"error": "Request failed after multiple retries."}
|
146 |
|
147 |
-
def ensemble_inference(prompt, models=ENSEMBLE_MODELS, max_tokens=1500
|
148 |
results = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
-
if
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
else: #Non-streaming behavior
|
166 |
-
with ThreadPoolExecutor(max_workers=len(models)) as executor:
|
167 |
-
future_to_model = {executor.submit(hf_inference, model, prompt, max_tokens, stream=False): model for model in models}
|
168 |
-
for future in as_completed(future_to_model):
|
169 |
-
model = future_to_model[future]
|
170 |
-
try:
|
171 |
-
result = future.result()
|
172 |
-
if "generated_text" in result:
|
173 |
-
results.append({"model": model, "text": result["generated_text"]})
|
174 |
-
except Exception as e:
|
175 |
-
logger.error(f"Error with model {model}: {e}")
|
176 |
-
|
177 |
-
if not results:
|
178 |
-
return {"error": "All models failed to generate responses"}
|
179 |
-
|
180 |
-
if len(results) == 1:
|
181 |
-
return {"generated_text": results[0]["text"]}
|
182 |
-
|
183 |
-
synthesis_prompt = "Synthesize these expert responses into a single coherent answer:\n\n"
|
184 |
-
for result in results:
|
185 |
-
synthesis_prompt += f"Expert {results.index(result) + 1} ({result['model'].split('/')[-1]}):\n{result['text']}\n\n"
|
186 |
-
|
187 |
-
synthesis = hf_inference(MAIN_LLM_MODEL, synthesis_prompt) # Use a consistent model for final synthesis
|
188 |
-
if "generated_text" in synthesis:
|
189 |
-
return synthesis
|
190 |
-
else:
|
191 |
-
return {"generated_text": max(results, key=lambda x: len(x["text"]))["text"]} # Fallback
|
192 |
|
193 |
def tool_search_web(query: str, num_results: int = NUM_RESULTS, safesearch: str = "moderate",
|
194 |
time_filter: Optional[str] = None, region: str = "wt-wt", language: str = "en-us") -> list:
|
@@ -672,7 +649,7 @@ tools = {
|
|
672 |
"max_results": {"type": "integer", "description": "Maximum number of articles to return."}
|
673 |
},
|
674 |
},
|
675 |
-
"
|
676 |
"function": tool_search_scholar,
|
677 |
"description": "Searches Google Scholar for academic publications.",
|
678 |
"parameters": {
|
@@ -811,7 +788,7 @@ Output:
|
|
811 |
return prompt
|
812 |
|
813 |
def deep_research(prompt):
|
814 |
-
task_description = "You are an advanced research assistant
|
815 |
research_data = load_research_data()
|
816 |
paper_summaries = load_paper_summaries() # Load paper summaries
|
817 |
|
@@ -841,7 +818,6 @@ def deep_research(prompt):
|
|
841 |
if key_entities:
|
842 |
context.append(f"Identified key entities: {key_entities}")
|
843 |
intermediate_output += f"Identified key entities for focused research: {key_entities_with_descriptions}\n"
|
844 |
-
yield "Identifying key entities... (Completed)"
|
845 |
|
846 |
# Initialize progress tracking for each entity.
|
847 |
entity_progress = {entity: {'queries': [], 'insights': []} for entity in key_entities}
|
@@ -853,9 +829,7 @@ def deep_research(prompt):
|
|
853 |
|
854 |
if not focus_areas: # Corrected placement: outside the loop
|
855 |
initial_focus_areas = tool_identify_focus_areas(prompt=prompt)
|
856 |
-
yield "Identifying initial focus areas...(Completed)"
|
857 |
research_plan = tool_draft_research_plan(prompt=prompt, entities=key_entities, focus_areas=initial_focus_areas)
|
858 |
-
yield "Drafting initial research plan...(Completed)"
|
859 |
context.append(f"Initial Research Plan: {research_plan[:200]}...") # Add plan to context
|
860 |
intermediate_output += f"Initial Research Plan:\n{research_plan}\n\n"
|
861 |
focus_areas = initial_focus_areas
|
@@ -885,7 +859,6 @@ def deep_research(prompt):
|
|
885 |
|
886 |
if i == 0: #Initial broad search
|
887 |
initial_query = tool_generate_search_query(prompt=prompt)
|
888 |
-
yield f"Generating initial search query... (Iteration {i+1})"
|
889 |
if initial_query:
|
890 |
previous_queries.append(initial_query)
|
891 |
entity_progress['general']['queries'].append(initial_query)
|
@@ -902,14 +875,12 @@ def deep_research(prompt):
|
|
902 |
search_results = []
|
903 |
for future in as_completed(futures):
|
904 |
search_results.extend(future.result())
|
905 |
-
yield f"Performing initial searches... (Iteration {i+1})"
|
906 |
|
907 |
filtered_search_results = filter_results(search_results, prompt)
|
908 |
|
909 |
if filtered_search_results:
|
910 |
context.append(f"Initial Search Results: {len(filtered_search_results)} items found")
|
911 |
reasoning_output = tool_reason(prompt, filtered_search_results)
|
912 |
-
yield f"Reasoning about initial search results... (Iteration {i+1})"
|
913 |
if reasoning_output:
|
914 |
all_insights.append(reasoning_output)
|
915 |
entity_progress['general']['insights'].append(reasoning_output)
|
@@ -926,7 +897,6 @@ def deep_research(prompt):
|
|
926 |
previous_queries=entity_progress[current_entity]['queries'],
|
927 |
focus_areas=focus_areas
|
928 |
)
|
929 |
-
yield f"Generating search query for entity: {current_entity}... (Iteration {i+1})"
|
930 |
|
931 |
if entity_query:
|
932 |
previous_queries.append(entity_query)
|
@@ -946,7 +916,7 @@ def deep_research(prompt):
|
|
946 |
for future in as_completed(futures):
|
947 |
search_results.extend(future.result())
|
948 |
|
949 |
-
|
950 |
filtered_search_results = filter_results(search_results,
|
951 |
f"{prompt} {current_entity}",
|
952 |
previous_snippets=seen_snippets) # Pass existing snippets
|
@@ -960,7 +930,6 @@ def deep_research(prompt):
|
|
960 |
reasoning_context=entity_progress[current_entity]['insights'], # Use entity-specific context
|
961 |
focus_areas=focus_areas
|
962 |
)
|
963 |
-
yield f"Reasoning about entity: {current_entity}... (Iteration {i+1})"
|
964 |
|
965 |
if entity_reasoning:
|
966 |
all_insights.append(entity_reasoning)
|
@@ -977,32 +946,23 @@ def deep_research(prompt):
|
|
977 |
context.append(f"Entity query for {current_entity} yielded no relevant results")
|
978 |
|
979 |
llm_prompt = create_prompt(task_description, prompt, tools, context)
|
980 |
-
llm_response = hf_inference(MAIN_LLM_MODEL, llm_prompt
|
981 |
|
982 |
if isinstance(llm_response, dict) and "error" in llm_response:
|
983 |
intermediate_output += f"LLM Error: {llm_response['error']}\n"
|
984 |
-
yield f"LLM Error (Iteration {i+1}): {llm_response['error']}" # Display error in output
|
985 |
continue
|
986 |
|
987 |
-
|
988 |
-
|
989 |
-
try:
|
990 |
-
for chunk in llm_response:
|
991 |
-
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
992 |
-
response_text += chunk.choices[0].delta.content
|
993 |
-
yield f"Iteration {i+1} - Thinking... {response_text}" # Real time output
|
994 |
-
|
995 |
-
except Exception as e:
|
996 |
-
intermediate_output += f"Streaming Error: {str(e)}\n"
|
997 |
-
yield f"Streaming Error (Iteration {i+1}): {str(e)}" #Error
|
998 |
continue
|
999 |
|
1000 |
try:
|
|
|
1001 |
response_json = json.loads(response_text) # Parse the JSON response.
|
1002 |
intermediate_output += f"Iteration {i+1} - Focus: {current_entity} - Action: {response_text}\n"
|
1003 |
except json.JSONDecodeError:
|
1004 |
-
intermediate_output += f"Iteration {i+1} - LLM Response (Invalid JSON): {
|
1005 |
-
context.append(f"Invalid JSON: {
|
1006 |
continue
|
1007 |
|
1008 |
tool_name = response_json.get("tool")
|
@@ -1028,7 +988,6 @@ def deep_research(prompt):
|
|
1028 |
parameters['failed_queries'] = failed_queries
|
1029 |
parameters['focus_areas'] = focus_areas
|
1030 |
result = tool["function"](**parameters)
|
1031 |
-
yield f"Iteration {i+1} - Generated search query: {result}"
|
1032 |
|
1033 |
if current_entity != 'general':
|
1034 |
entity_progress[current_entity]['queries'].append(result) # Add entity-specific
|
@@ -1068,7 +1027,6 @@ def deep_research(prompt):
|
|
1068 |
parameters['focus_areas'] = focus_areas
|
1069 |
|
1070 |
result = tool["function"](**parameters)
|
1071 |
-
yield f"Iteration {i+1} - Reasoning about information..."
|
1072 |
|
1073 |
if current_entity != 'general':
|
1074 |
entity_progress[current_entity]['insights'].append(result)
|
@@ -1091,7 +1049,6 @@ def deep_research(prompt):
|
|
1091 |
parameters['prompt'] = prompt
|
1092 |
|
1093 |
result = tool["function"](**parameters)
|
1094 |
-
yield f"Iteration {i+1} - Critiquing reasoning..."
|
1095 |
previous_critiques.append(result)
|
1096 |
context.append(f"Critique: {result[:200]}...")
|
1097 |
else:
|
@@ -1099,7 +1056,6 @@ def deep_research(prompt):
|
|
1099 |
|
1100 |
elif tool_name == "identify_contradictions":
|
1101 |
result = tool["function"](**parameters)
|
1102 |
-
yield f"Iteration {i+1} - Identifying contradictions..."
|
1103 |
if result:
|
1104 |
contradictions = result # Keep track of contradictions.
|
1105 |
context.append(f"Identified contradictions: {result}")
|
@@ -1108,7 +1064,6 @@ def deep_research(prompt):
|
|
1108 |
if 'failed_areas' not in parameters:
|
1109 |
parameters['failed_areas'] = failed_areas
|
1110 |
result = tool["function"](**parameters)
|
1111 |
-
yield f"Iteration {i+1} - Identifying focus areas..."
|
1112 |
if result:
|
1113 |
old_focus = set(focus_areas)
|
1114 |
focus_areas = result # Update focus areas
|
@@ -1117,7 +1072,6 @@ def deep_research(prompt):
|
|
1117 |
|
1118 |
elif tool_name == "extract_article":
|
1119 |
result = tool["function"](**parameters)
|
1120 |
-
yield f"Iteration {i+1} - Extracting article content..."
|
1121 |
if result:
|
1122 |
context.append(f"Extracted article content from {parameters['url']}: {result[:200]}...")
|
1123 |
# Reason specifically about the extracted article.
|
@@ -1128,7 +1082,6 @@ def deep_research(prompt):
|
|
1128 |
|
1129 |
elif tool_name == "summarize_paper":
|
1130 |
result = tool["function"](**parameters)
|
1131 |
-
yield f"Iteration {i+1} - Summarizing paper..."
|
1132 |
if result:
|
1133 |
paper_summaries[parameters['paper_text'][:100]] = result # Store by a snippet of the text
|
1134 |
save_paper_summaries(paper_summaries)
|
@@ -1142,7 +1095,6 @@ def deep_research(prompt):
|
|
1142 |
if 'prompt' not in parameters:
|
1143 |
parameters['prompt'] = prompt
|
1144 |
result = tool["function"](**parameters)
|
1145 |
-
yield f"Iteration {i+1} - Performing meta-analysis..."
|
1146 |
if result:
|
1147 |
all_insights.append(result) # Add meta-analysis to overall insights.
|
1148 |
context.append(f"Meta-analysis across entities: {result[:200]}...")
|
@@ -1230,24 +1182,25 @@ def deep_research(prompt):
|
|
1230 |
full_output += f"Total iterations: {i+1}\n"
|
1231 |
full_output += f"Total insights generated: {len(all_insights)}\n"
|
1232 |
|
1233 |
-
|
1234 |
-
|
1235 |
|
1236 |
custom_css = """
|
1237 |
.gradio-container {
|
|
|
1238 |
}
|
1239 |
.output-box {
|
1240 |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
1241 |
line-height: 1.5;
|
1242 |
font-size: 14px;
|
1243 |
-
white-space: pre-wrap; /* Preserve newlines and spacing */
|
1244 |
}
|
1245 |
h3 {
|
|
|
1246 |
font-weight: 600;
|
1247 |
}
|
1248 |
.footer {
|
1249 |
text-align: center;
|
1250 |
margin-top: 20px;
|
|
|
1251 |
font-size: 0.9em;
|
1252 |
}
|
1253 |
"""
|
@@ -1257,11 +1210,11 @@ iface = gr.Interface(
|
|
1257 |
inputs=[
|
1258 |
gr.Textbox(lines=5, placeholder="Enter your research question...", label="Research Question")
|
1259 |
],
|
1260 |
-
outputs=gr.
|
1261 |
title="Advanced Multi-Stage Research Assistant",
|
1262 |
description="""This tool performs deep, multi-faceted research, leveraging multiple search engines,
|
1263 |
specialized academic databases, and advanced AI models. It incorporates a persistent knowledge
|
1264 |
-
base using FAISS indexing to avoid redundant searches and build upon previous findings.
|
1265 |
examples=[
|
1266 |
["What are the key factors affecting urban tree survival and how do they vary between developing and developed countries?"],
|
1267 |
["Compare and contrast the economic policies of China and the United States over the past two decades, analyzing their impacts on global trade."],
|
@@ -1273,7 +1226,6 @@ iface = gr.Interface(
|
|
1273 |
cache_examples=False,
|
1274 |
css=custom_css,
|
1275 |
allow_flagging="never",
|
1276 |
-
live=True #for real time streaming
|
1277 |
)
|
1278 |
|
1279 |
if __name__ == "__main__":
|
|
|
121 |
return {}
|
122 |
|
123 |
|
124 |
+
def hf_inference(model_name, prompt, max_tokens=2000, retries=5):
|
125 |
for attempt in range(retries):
|
126 |
try:
|
127 |
messages = [{"role": "user", "content": prompt}]
|
128 |
+
response = client.chat.completions.create(
|
129 |
model=model_name,
|
130 |
messages=messages,
|
131 |
+
max_tokens=max_tokens
|
|
|
132 |
)
|
133 |
+
return {"generated_text": response.choices[0].message.content}
|
|
|
|
|
|
|
|
|
|
|
134 |
except Exception as e:
|
135 |
if attempt == retries - 1:
|
136 |
logger.error(f"Request failed after {retries} retries: {e}")
|
|
|
138 |
time.sleep(RETRY_DELAY * (1 + attempt))
|
139 |
return {"error": "Request failed after multiple retries."}
|
140 |
|
141 |
+
def ensemble_inference(prompt, models=ENSEMBLE_MODELS, max_tokens=1500):
|
142 |
results = []
|
143 |
+
with ThreadPoolExecutor(max_workers=len(models)) as executor:
|
144 |
+
future_to_model = {executor.submit(hf_inference, model, prompt, max_tokens): model for model in models}
|
145 |
+
for future in as_completed(future_to_model):
|
146 |
+
model = future_to_model[future]
|
147 |
+
try:
|
148 |
+
result = future.result()
|
149 |
+
if "generated_text" in result:
|
150 |
+
results.append({"model": model, "text": result["generated_text"]})
|
151 |
+
except Exception as e:
|
152 |
+
logger.error(f"Error with model {model}: {e}")
|
153 |
|
154 |
+
if not results:
|
155 |
+
return {"error": "All models failed to generate responses"}
|
156 |
+
|
157 |
+
if len(results) == 1:
|
158 |
+
return {"generated_text": results[0]["text"]}
|
159 |
+
|
160 |
+
synthesis_prompt = "Synthesize these expert responses into a single coherent answer:\n\n"
|
161 |
+
for result in results:
|
162 |
+
synthesis_prompt += f"Expert {results.index(result) + 1} ({result['model'].split('/')[-1]}):\n{result['text']}\n\n"
|
163 |
+
|
164 |
+
synthesis = hf_inference(MAIN_LLM_MODEL, synthesis_prompt) # Use a consistent model for final synthesis
|
165 |
+
if "generated_text" in synthesis:
|
166 |
+
return synthesis
|
167 |
+
else:
|
168 |
+
return {"generated_text": max(results, key=lambda x: len(x["text"]))["text"]} # Fallback
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
def tool_search_web(query: str, num_results: int = NUM_RESULTS, safesearch: str = "moderate",
|
171 |
time_filter: Optional[str] = None, region: str = "wt-wt", language: str = "en-us") -> list:
|
|
|
649 |
"max_results": {"type": "integer", "description": "Maximum number of articles to return."}
|
650 |
},
|
651 |
},
|
652 |
+
"search_scholar": {
|
653 |
"function": tool_search_scholar,
|
654 |
"description": "Searches Google Scholar for academic publications.",
|
655 |
"parameters": {
|
|
|
788 |
return prompt
|
789 |
|
790 |
def deep_research(prompt):
|
791 |
+
task_description = "You are an advanced research assistant. Use available tools iteratively, focus on different aspects, follow promising leads, critically evaluate your findings, and build up a comprehensive understanding. Utilize the FAISS index to avoid redundant searches and build a persistent knowledge base."
|
792 |
research_data = load_research_data()
|
793 |
paper_summaries = load_paper_summaries() # Load paper summaries
|
794 |
|
|
|
818 |
if key_entities:
|
819 |
context.append(f"Identified key entities: {key_entities}")
|
820 |
intermediate_output += f"Identified key entities for focused research: {key_entities_with_descriptions}\n"
|
|
|
821 |
|
822 |
# Initialize progress tracking for each entity.
|
823 |
entity_progress = {entity: {'queries': [], 'insights': []} for entity in key_entities}
|
|
|
829 |
|
830 |
if not focus_areas: # Corrected placement: outside the loop
|
831 |
initial_focus_areas = tool_identify_focus_areas(prompt=prompt)
|
|
|
832 |
research_plan = tool_draft_research_plan(prompt=prompt, entities=key_entities, focus_areas=initial_focus_areas)
|
|
|
833 |
context.append(f"Initial Research Plan: {research_plan[:200]}...") # Add plan to context
|
834 |
intermediate_output += f"Initial Research Plan:\n{research_plan}\n\n"
|
835 |
focus_areas = initial_focus_areas
|
|
|
859 |
|
860 |
if i == 0: #Initial broad search
|
861 |
initial_query = tool_generate_search_query(prompt=prompt)
|
|
|
862 |
if initial_query:
|
863 |
previous_queries.append(initial_query)
|
864 |
entity_progress['general']['queries'].append(initial_query)
|
|
|
875 |
search_results = []
|
876 |
for future in as_completed(futures):
|
877 |
search_results.extend(future.result())
|
|
|
878 |
|
879 |
filtered_search_results = filter_results(search_results, prompt)
|
880 |
|
881 |
if filtered_search_results:
|
882 |
context.append(f"Initial Search Results: {len(filtered_search_results)} items found")
|
883 |
reasoning_output = tool_reason(prompt, filtered_search_results)
|
|
|
884 |
if reasoning_output:
|
885 |
all_insights.append(reasoning_output)
|
886 |
entity_progress['general']['insights'].append(reasoning_output)
|
|
|
897 |
previous_queries=entity_progress[current_entity]['queries'],
|
898 |
focus_areas=focus_areas
|
899 |
)
|
|
|
900 |
|
901 |
if entity_query:
|
902 |
previous_queries.append(entity_query)
|
|
|
916 |
for future in as_completed(futures):
|
917 |
search_results.extend(future.result())
|
918 |
|
919 |
+
|
920 |
filtered_search_results = filter_results(search_results,
|
921 |
f"{prompt} {current_entity}",
|
922 |
previous_snippets=seen_snippets) # Pass existing snippets
|
|
|
930 |
reasoning_context=entity_progress[current_entity]['insights'], # Use entity-specific context
|
931 |
focus_areas=focus_areas
|
932 |
)
|
|
|
933 |
|
934 |
if entity_reasoning:
|
935 |
all_insights.append(entity_reasoning)
|
|
|
946 |
context.append(f"Entity query for {current_entity} yielded no relevant results")
|
947 |
|
948 |
llm_prompt = create_prompt(task_description, prompt, tools, context)
|
949 |
+
llm_response = hf_inference(MAIN_LLM_MODEL, llm_prompt)
|
950 |
|
951 |
if isinstance(llm_response, dict) and "error" in llm_response:
|
952 |
intermediate_output += f"LLM Error: {llm_response['error']}\n"
|
|
|
953 |
continue
|
954 |
|
955 |
+
if not isinstance(llm_response, dict) or "generated_text" not in llm_response:
|
956 |
+
intermediate_output += "Error: Invalid LLM response.\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
957 |
continue
|
958 |
|
959 |
try:
|
960 |
+
response_text = llm_response["generated_text"].strip()
|
961 |
response_json = json.loads(response_text) # Parse the JSON response.
|
962 |
intermediate_output += f"Iteration {i+1} - Focus: {current_entity} - Action: {response_text}\n"
|
963 |
except json.JSONDecodeError:
|
964 |
+
intermediate_output += f"Iteration {i+1} - LLM Response (Invalid JSON): {llm_response['generated_text'][:100]}...\n"
|
965 |
+
context.append(f"Invalid JSON: {llm_response['generated_text'][:100]}...") # Add invalid JSON to context
|
966 |
continue
|
967 |
|
968 |
tool_name = response_json.get("tool")
|
|
|
988 |
parameters['failed_queries'] = failed_queries
|
989 |
parameters['focus_areas'] = focus_areas
|
990 |
result = tool["function"](**parameters)
|
|
|
991 |
|
992 |
if current_entity != 'general':
|
993 |
entity_progress[current_entity]['queries'].append(result) # Add entity-specific
|
|
|
1027 |
parameters['focus_areas'] = focus_areas
|
1028 |
|
1029 |
result = tool["function"](**parameters)
|
|
|
1030 |
|
1031 |
if current_entity != 'general':
|
1032 |
entity_progress[current_entity]['insights'].append(result)
|
|
|
1049 |
parameters['prompt'] = prompt
|
1050 |
|
1051 |
result = tool["function"](**parameters)
|
|
|
1052 |
previous_critiques.append(result)
|
1053 |
context.append(f"Critique: {result[:200]}...")
|
1054 |
else:
|
|
|
1056 |
|
1057 |
elif tool_name == "identify_contradictions":
|
1058 |
result = tool["function"](**parameters)
|
|
|
1059 |
if result:
|
1060 |
contradictions = result # Keep track of contradictions.
|
1061 |
context.append(f"Identified contradictions: {result}")
|
|
|
1064 |
if 'failed_areas' not in parameters:
|
1065 |
parameters['failed_areas'] = failed_areas
|
1066 |
result = tool["function"](**parameters)
|
|
|
1067 |
if result:
|
1068 |
old_focus = set(focus_areas)
|
1069 |
focus_areas = result # Update focus areas
|
|
|
1072 |
|
1073 |
elif tool_name == "extract_article":
|
1074 |
result = tool["function"](**parameters)
|
|
|
1075 |
if result:
|
1076 |
context.append(f"Extracted article content from {parameters['url']}: {result[:200]}...")
|
1077 |
# Reason specifically about the extracted article.
|
|
|
1082 |
|
1083 |
elif tool_name == "summarize_paper":
|
1084 |
result = tool["function"](**parameters)
|
|
|
1085 |
if result:
|
1086 |
paper_summaries[parameters['paper_text'][:100]] = result # Store by a snippet of the text
|
1087 |
save_paper_summaries(paper_summaries)
|
|
|
1095 |
if 'prompt' not in parameters:
|
1096 |
parameters['prompt'] = prompt
|
1097 |
result = tool["function"](**parameters)
|
|
|
1098 |
if result:
|
1099 |
all_insights.append(result) # Add meta-analysis to overall insights.
|
1100 |
context.append(f"Meta-analysis across entities: {result[:200]}...")
|
|
|
1182 |
full_output += f"Total iterations: {i+1}\n"
|
1183 |
full_output += f"Total insights generated: {len(all_insights)}\n"
|
1184 |
|
1185 |
+
return full_output
|
|
|
1186 |
|
1187 |
custom_css = """
|
1188 |
.gradio-container {
|
1189 |
+
background-color: #f7f9fc;
|
1190 |
}
|
1191 |
.output-box {
|
1192 |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
1193 |
line-height: 1.5;
|
1194 |
font-size: 14px;
|
|
|
1195 |
}
|
1196 |
h3 {
|
1197 |
+
color: #2c3e50;
|
1198 |
font-weight: 600;
|
1199 |
}
|
1200 |
.footer {
|
1201 |
text-align: center;
|
1202 |
margin-top: 20px;
|
1203 |
+
color: #7f8c8d;
|
1204 |
font-size: 0.9em;
|
1205 |
}
|
1206 |
"""
|
|
|
1210 |
inputs=[
|
1211 |
gr.Textbox(lines=5, placeholder="Enter your research question...", label="Research Question")
|
1212 |
],
|
1213 |
+
outputs=gr.Textbox(lines=30, placeholder="Research results will appear here...", label="Research Results", elem_classes=["output-box"]),
|
1214 |
title="Advanced Multi-Stage Research Assistant",
|
1215 |
description="""This tool performs deep, multi-faceted research, leveraging multiple search engines,
|
1216 |
specialized academic databases, and advanced AI models. It incorporates a persistent knowledge
|
1217 |
+
base using FAISS indexing to avoid redundant searches and build upon previous findings.""",
|
1218 |
examples=[
|
1219 |
["What are the key factors affecting urban tree survival and how do they vary between developing and developed countries?"],
|
1220 |
["Compare and contrast the economic policies of China and the United States over the past two decades, analyzing their impacts on global trade."],
|
|
|
1226 |
cache_examples=False,
|
1227 |
css=custom_css,
|
1228 |
allow_flagging="never",
|
|
|
1229 |
)
|
1230 |
|
1231 |
if __name__ == "__main__":
|