Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -34,8 +34,8 @@ class ReasoningOrchestra:
|
|
34 |
|
35 |
def format_text_to_html(self, text: str) -> str:
|
36 |
"""Convert text to HTML with proper formatting"""
|
37 |
-
if not text:
|
38 |
-
return "<p>No content
|
39 |
|
40 |
# Escape HTML characters first
|
41 |
text = html.escape(text)
|
@@ -75,7 +75,7 @@ class ReasoningOrchestra:
|
|
75 |
formatted_lines.append(f'<li>{stripped[3:]}</li>')
|
76 |
else:
|
77 |
if in_list:
|
78 |
-
formatted_lines.append('</ul>' if
|
79 |
in_list = False
|
80 |
if stripped:
|
81 |
formatted_lines.append(f'<p>{line}</p>')
|
@@ -104,12 +104,14 @@ Please provide a comprehensive analysis with deep reasoning. Think through all i
|
|
104 |
model="deepseek-r1-distill-llama-70b",
|
105 |
messages=[{"role": "user", "content": prompt}],
|
106 |
temperature=0.6,
|
107 |
-
max_completion_tokens=
|
108 |
top_p=0.95,
|
109 |
reasoning_format="raw"
|
110 |
)
|
111 |
|
112 |
-
response_content = completion.choices[0].message.content
|
|
|
|
|
113 |
|
114 |
return {
|
115 |
"model": "DeepSeek R1 (Deep Thinker)",
|
@@ -146,11 +148,12 @@ Be decisive and solution-focused. Provide concrete, actionable recommendations."
|
|
146 |
messages=[{"role": "user", "content": prompt}],
|
147 |
temperature=0.6,
|
148 |
top_p=0.95,
|
149 |
-
max_completion_tokens=
|
150 |
-
reasoning_effort="default"
|
151 |
)
|
152 |
|
153 |
-
response_content = completion.choices[0].message.content
|
|
|
|
|
154 |
|
155 |
return {
|
156 |
"model": "Qwen3 32B (Quick Strategist)",
|
@@ -183,16 +186,29 @@ Please conduct a thorough investigation including:
|
|
183 |
Be extremely thorough and leave no stone unturned. Provide detailed evidence and reasoning for your conclusions."""
|
184 |
|
185 |
try:
|
|
|
186 |
completion = self.client.chat.completions.create(
|
187 |
model="qwen-qwq-32b",
|
188 |
messages=[{"role": "user", "content": prompt}],
|
189 |
-
temperature=0.
|
190 |
-
top_p=0.
|
191 |
-
max_completion_tokens=
|
192 |
-
reasoning_format="parsed"
|
193 |
)
|
194 |
|
195 |
-
response_content = completion.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
|
197 |
return {
|
198 |
"model": "QwQ 32B (Detail Detective)",
|
@@ -202,19 +218,31 @@ Be extremely thorough and leave no stone unturned. Provide detailed evidence and
|
|
202 |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A"
|
203 |
}
|
204 |
except Exception as e:
|
205 |
-
|
|
|
|
|
|
|
|
|
206 |
|
207 |
def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str:
|
208 |
"""Synthesize all three perspectives into a final orchestrated solution"""
|
209 |
if not self.is_api_key_set:
|
210 |
return "API key not set"
|
211 |
|
212 |
-
# Extract reasoning content safely
|
213 |
-
|
214 |
-
|
215 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
|
217 |
-
synthesis_prompt = f"""You are the Orchestra Conductor. You have received three different
|
218 |
|
219 |
ORIGINAL PROBLEM: {original_problem}
|
220 |
|
@@ -228,24 +256,31 @@ DETECTIVE INVESTIGATION:
|
|
228 |
{detective_reasoning}
|
229 |
|
230 |
Please create a unified synthesis that:
|
231 |
-
1. Combines the best insights from all
|
232 |
-
2.
|
233 |
-
3.
|
234 |
-
4.
|
235 |
-
5.
|
|
|
236 |
|
237 |
-
|
|
|
|
|
238 |
|
239 |
try:
|
240 |
completion = self.client.chat.completions.create(
|
241 |
model="qwen/qwen3-32b",
|
242 |
messages=[{"role": "user", "content": synthesis_prompt}],
|
243 |
temperature=0.7,
|
244 |
-
max_completion_tokens=
|
245 |
top_p=0.9
|
246 |
)
|
247 |
|
248 |
-
|
|
|
|
|
|
|
|
|
249 |
except Exception as e:
|
250 |
return f"Synthesis error: {str(e)}"
|
251 |
|
@@ -272,12 +307,6 @@ def run_single_model(problem: str, model_choice: str, context: str = "") -> str:
|
|
272 |
|
273 |
start_time = time.time()
|
274 |
|
275 |
-
# Show loading state
|
276 |
-
loading_html = f"""<div style="padding: 20px; border: 2px solid #007bff; border-radius: 10px; background-color: #e6f3ff;">
|
277 |
-
<h3>๐ Processing...</h3>
|
278 |
-
<p>The {model_choice} is analyzing your problem. Please wait...</p>
|
279 |
-
</div>"""
|
280 |
-
|
281 |
if model_choice == "Deep Thinker (DeepSeek R1)":
|
282 |
result = orchestra.deep_thinker_analyze(problem, context)
|
283 |
elif model_choice == "Quick Strategist (Qwen3 32B)":
|
@@ -355,8 +384,9 @@ def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str,
|
|
355 |
def format_result_html(result: Dict, color: str, icon: str) -> str:
|
356 |
if "error" in result:
|
357 |
return f"""<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;">
|
358 |
-
<h3>โ Error</h3>
|
359 |
<p>{result['error']}</p>
|
|
|
360 |
</div>"""
|
361 |
|
362 |
reasoning_html = orchestra.format_text_to_html(result['reasoning'])
|
@@ -451,7 +481,7 @@ with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app:
|
|
451 |
<div class="orchestra-header">
|
452 |
<h1>๐ผ The Collaborative Reasoning Orchestra</h1>
|
453 |
<p><em>Where AI models collaborate like musicians in an orchestra to solve complex problems</em></p>
|
454 |
-
<p><strong>Now with Beautiful HTML-Formatted Responses!</strong></p>
|
455 |
</div>
|
456 |
""")
|
457 |
|
@@ -519,7 +549,7 @@ with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app:
|
|
519 |
|
520 |
# Full Orchestra Tab
|
521 |
with gr.TabItem("๐ผ Full Orchestra Collaboration"):
|
522 |
-
gr.Markdown("### Run all three models collaboratively with stunning HTML-formatted output")
|
523 |
|
524 |
with gr.Column():
|
525 |
with gr.Row():
|
@@ -575,13 +605,6 @@ with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app:
|
|
575 |
**๐ Transportation Future:**
|
576 |
"Design a comprehensive transportation system for a smart city of 1 million people in 2035, integrating autonomous vehicles, public transit, and sustainable mobility."
|
577 |
""")
|
578 |
-
|
579 |
-
# Quick copy buttons for examples
|
580 |
-
with gr.Row():
|
581 |
-
gr.Button("๐ Copy Business Example", variant="secondary").click(
|
582 |
-
lambda: "Our tech startup has limited funding and needs to decide between focusing on product development or marketing. We have a working MVP but low user adoption. Budget is $50K for the next 6 months.",
|
583 |
-
outputs=[]
|
584 |
-
)
|
585 |
|
586 |
# Footer
|
587 |
gr.HTML("""
|
|
|
34 |
|
35 |
def format_text_to_html(self, text: str) -> str:
|
36 |
"""Convert text to HTML with proper formatting"""
|
37 |
+
if not text or text.strip() == "" or text == "No response generated":
|
38 |
+
return "<p style='color: #666; font-style: italic;'>No content was generated. This might be due to API limitations or model availability issues.</p>"
|
39 |
|
40 |
# Escape HTML characters first
|
41 |
text = html.escape(text)
|
|
|
75 |
formatted_lines.append(f'<li>{stripped[3:]}</li>')
|
76 |
else:
|
77 |
if in_list:
|
78 |
+
formatted_lines.append('</ul>' if any('<li>' in line for line in formatted_lines[-5:]) else '</ol>')
|
79 |
in_list = False
|
80 |
if stripped:
|
81 |
formatted_lines.append(f'<p>{line}</p>')
|
|
|
104 |
model="deepseek-r1-distill-llama-70b",
|
105 |
messages=[{"role": "user", "content": prompt}],
|
106 |
temperature=0.6,
|
107 |
+
max_completion_tokens=4096,
|
108 |
top_p=0.95,
|
109 |
reasoning_format="raw"
|
110 |
)
|
111 |
|
112 |
+
response_content = completion.choices[0].message.content
|
113 |
+
if not response_content or response_content.strip() == "":
|
114 |
+
response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues."
|
115 |
|
116 |
return {
|
117 |
"model": "DeepSeek R1 (Deep Thinker)",
|
|
|
148 |
messages=[{"role": "user", "content": prompt}],
|
149 |
temperature=0.6,
|
150 |
top_p=0.95,
|
151 |
+
max_completion_tokens=4096
|
|
|
152 |
)
|
153 |
|
154 |
+
response_content = completion.choices[0].message.content
|
155 |
+
if not response_content or response_content.strip() == "":
|
156 |
+
response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues."
|
157 |
|
158 |
return {
|
159 |
"model": "Qwen3 32B (Quick Strategist)",
|
|
|
186 |
Be extremely thorough and leave no stone unturned. Provide detailed evidence and reasoning for your conclusions."""
|
187 |
|
188 |
try:
|
189 |
+
# Try with different parameters for QwQ model
|
190 |
completion = self.client.chat.completions.create(
|
191 |
model="qwen-qwq-32b",
|
192 |
messages=[{"role": "user", "content": prompt}],
|
193 |
+
temperature=0.7,
|
194 |
+
top_p=0.9,
|
195 |
+
max_completion_tokens=4096
|
|
|
196 |
)
|
197 |
|
198 |
+
response_content = completion.choices[0].message.content
|
199 |
+
if not response_content or response_content.strip() == "":
|
200 |
+
# Fallback: try with a simpler prompt
|
201 |
+
fallback_prompt = f"Analyze this problem in detail: {problem}"
|
202 |
+
fallback_completion = self.client.chat.completions.create(
|
203 |
+
model="qwen-qwq-32b",
|
204 |
+
messages=[{"role": "user", "content": fallback_prompt}],
|
205 |
+
temperature=0.5,
|
206 |
+
max_completion_tokens=4096
|
207 |
+
)
|
208 |
+
response_content = fallback_completion.choices[0].message.content
|
209 |
+
|
210 |
+
if not response_content or response_content.strip() == "":
|
211 |
+
response_content = "The QwQ model encountered an issue generating content. This could be due to the complexity of the prompt, content filtering, or temporary model availability issues. The model may work better with simpler, more direct questions."
|
212 |
|
213 |
return {
|
214 |
"model": "QwQ 32B (Detail Detective)",
|
|
|
218 |
"tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A"
|
219 |
}
|
220 |
except Exception as e:
|
221 |
+
# If QwQ fails, provide a helpful error message
|
222 |
+
error_msg = f"Detail Detective error: {str(e)}"
|
223 |
+
if "model" in str(e).lower() or "not found" in str(e).lower():
|
224 |
+
error_msg += "\n\nNote: The QwQ model may not be available in your region or may have usage restrictions. You can still use the other models in the orchestra."
|
225 |
+
return {"error": error_msg}
|
226 |
|
227 |
def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str:
|
228 |
"""Synthesize all three perspectives into a final orchestrated solution"""
|
229 |
if not self.is_api_key_set:
|
230 |
return "API key not set"
|
231 |
|
232 |
+
# Extract reasoning content safely with better error handling
|
233 |
+
def extract_reasoning(result: Dict, model_name: str) -> str:
|
234 |
+
if result.get('error'):
|
235 |
+
return f"**{model_name} encountered an issue:** {result['error']}"
|
236 |
+
reasoning = result.get('reasoning', '')
|
237 |
+
if not reasoning or reasoning.strip() == "" or reasoning == "No response generated":
|
238 |
+
return f"**{model_name}** did not provide analysis (this may be due to model limitations or API issues)."
|
239 |
+
return reasoning
|
240 |
+
|
241 |
+
deep_reasoning = extract_reasoning(deep_result, "Deep Thinker")
|
242 |
+
strategic_reasoning = extract_reasoning(strategic_result, "Quick Strategist")
|
243 |
+
detective_reasoning = extract_reasoning(detective_result, "Detail Detective")
|
244 |
|
245 |
+
synthesis_prompt = f"""You are the Orchestra Conductor. You have received analytical perspectives from three different AI reasoning specialists on the same problem. Your job is to synthesize these into a comprehensive, unified solution.
|
246 |
|
247 |
ORIGINAL PROBLEM: {original_problem}
|
248 |
|
|
|
256 |
{detective_reasoning}
|
257 |
|
258 |
Please create a unified synthesis that:
|
259 |
+
1. Combines the best insights from all available analyses
|
260 |
+
2. Addresses any gaps where models didn't provide input
|
261 |
+
3. Resolves any contradictions between the analyses
|
262 |
+
4. Provides a comprehensive final recommendation
|
263 |
+
5. Highlights where the different reasoning styles complement each other
|
264 |
+
6. Gives a clear, actionable conclusion
|
265 |
|
266 |
+
If some models didn't provide analysis, work with what's available and note any limitations.
|
267 |
+
|
268 |
+
Format your response as a well-structured final solution that leverages all available reasoning approaches."""
|
269 |
|
270 |
try:
|
271 |
completion = self.client.chat.completions.create(
|
272 |
model="qwen/qwen3-32b",
|
273 |
messages=[{"role": "user", "content": synthesis_prompt}],
|
274 |
temperature=0.7,
|
275 |
+
max_completion_tokens=4096,
|
276 |
top_p=0.9
|
277 |
)
|
278 |
|
279 |
+
synthesis_content = completion.choices[0].message.content
|
280 |
+
if not synthesis_content or synthesis_content.strip() == "":
|
281 |
+
return "The synthesis could not be generated. This may be due to API limitations or the complexity of combining the different analyses."
|
282 |
+
|
283 |
+
return synthesis_content
|
284 |
except Exception as e:
|
285 |
return f"Synthesis error: {str(e)}"
|
286 |
|
|
|
307 |
|
308 |
start_time = time.time()
|
309 |
|
|
|
|
|
|
|
|
|
|
|
|
|
310 |
if model_choice == "Deep Thinker (DeepSeek R1)":
|
311 |
result = orchestra.deep_thinker_analyze(problem, context)
|
312 |
elif model_choice == "Quick Strategist (Qwen3 32B)":
|
|
|
384 |
def format_result_html(result: Dict, color: str, icon: str) -> str:
|
385 |
if "error" in result:
|
386 |
return f"""<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;">
|
387 |
+
<h3>โ Model Error</h3>
|
388 |
<p>{result['error']}</p>
|
389 |
+
<p style="font-size: 12px; color: #666; margin-top: 10px;"><em>This model may have restrictions or temporary availability issues. The other models can still provide analysis.</em></p>
|
390 |
</div>"""
|
391 |
|
392 |
reasoning_html = orchestra.format_text_to_html(result['reasoning'])
|
|
|
481 |
<div class="orchestra-header">
|
482 |
<h1>๐ผ The Collaborative Reasoning Orchestra</h1>
|
483 |
<p><em>Where AI models collaborate like musicians in an orchestra to solve complex problems</em></p>
|
484 |
+
<p><strong>Now with Enhanced Error Handling & Beautiful HTML-Formatted Responses!</strong></p>
|
485 |
</div>
|
486 |
""")
|
487 |
|
|
|
549 |
|
550 |
# Full Orchestra Tab
|
551 |
with gr.TabItem("๐ผ Full Orchestra Collaboration"):
|
552 |
+
gr.Markdown("### Run all three models collaboratively with enhanced error handling and stunning HTML-formatted output")
|
553 |
|
554 |
with gr.Column():
|
555 |
with gr.Row():
|
|
|
605 |
**๐ Transportation Future:**
|
606 |
"Design a comprehensive transportation system for a smart city of 1 million people in 2035, integrating autonomous vehicles, public transit, and sustainable mobility."
|
607 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
608 |
|
609 |
# Footer
|
610 |
gr.HTML("""
|