Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import asyncio
|
3 |
+
from groq import Groq
|
4 |
+
import json
|
5 |
+
import time
|
6 |
+
from typing import Dict, List, Tuple, Optional
|
7 |
+
import threading
|
8 |
+
from datetime import datetime
|
9 |
+
|
10 |
+
class ReasoningOrchestra:
|
11 |
+
def __init__(self):
|
12 |
+
self.client = None
|
13 |
+
self.is_api_key_set = False
|
14 |
+
|
15 |
+
def set_api_key(self, api_key: str) -> str:
|
16 |
+
"""Set the Groq API key and test connection"""
|
17 |
+
if not api_key.strip():
|
18 |
+
return "β Please enter a valid API key"
|
19 |
+
|
20 |
+
try:
|
21 |
+
self.client = Groq(api_key=api_key.strip())
|
22 |
+
# Test the connection with a simple request
|
23 |
+
test_completion = self.client.chat.completions.create(
|
24 |
+
model="qwen/qwen3-32b",
|
25 |
+
messages=[{"role": "user", "content": "Hello"}],
|
26 |
+
max_completion_tokens=10
|
27 |
+
)
|
28 |
+
self.is_api_key_set = True
|
29 |
+
return "β
API key validated successfully! You can now use the Reasoning Orchestra."
|
30 |
+
except Exception as e:
|
31 |
+
self.is_api_key_set = False
|
32 |
+
return f"β API key validation failed: {str(e)}"
|
33 |
+
|
34 |
+
def deep_thinker_analyze(self, problem: str, context: str = "") -> Dict:
|
35 |
+
"""DeepSeek R1 - The Deep Thinker"""
|
36 |
+
if not self.is_api_key_set:
|
37 |
+
return {"error": "API key not set"}
|
38 |
+
|
39 |
+
prompt = f"""You are the Deep Thinker in a collaborative reasoning system. Your role is to provide thorough, methodical analysis with extensive step-by-step reasoning.
|
40 |
+
|
41 |
+
Problem: {problem}
|
42 |
+
{f"Additional Context: {context}" if context else ""}
|
43 |
+
|
44 |
+
Please provide a comprehensive analysis with deep reasoning. Think through all implications, consider multiple angles, and provide detailed step-by-step logic."""
|
45 |
+
|
46 |
+
try:
|
47 |
+
completion = self.client.chat.completions.create(
|
48 |
+
model="deepseek-r1-distill-llama-70b",
|
49 |
+
messages=[{"role": "user", "content": prompt}],
|
50 |
+
temperature=0.6,
|
51 |
+
max_completion_tokens=2048,
|
52 |
+
top_p=0.95,
|
53 |
+
reasoning_format="raw"
|
54 |
+
)
|
55 |
+
|
56 |
+
response_content = completion.choices[0].message.content
|
57 |
+
|
58 |
+
return {
|
59 |
+
"model": "DeepSeek R1 (Deep Thinker)",
|
60 |
+
"role": "π The Philosopher & Deep Analyzer",
|
61 |
+
"reasoning": response_content,
|
62 |
+
"timestamp": datetime.now().strftime("%H:%M:%S"),
|
63 |
+
"tokens_used": completion.usage.total_tokens if hasattr(completion, 'usage') else "N/A"
|
64 |
+
}
|
65 |
+
except Exception as e:
|
66 |
+
return {"error": f"Deep Thinker error: {str(e)}"}
|
67 |
+
|
68 |
+
def quick_strategist_analyze(self, problem: str, context: str = "") -> Dict:
|
69 |
+
"""Qwen3 32B - The Quick Strategist"""
|
70 |
+
if not self.is_api_key_set:
|
71 |
+
return {"error": "API key not set"}
|
72 |
+
|
73 |
+
prompt = f"""You are the Quick Strategist in a collaborative reasoning system. Your role is to provide fast, efficient strategic analysis with clear action plans.
|
74 |
+
|
75 |
+
Problem: {problem}
|
76 |
+
{f"Additional Context: {context}" if context else ""}
|
77 |
+
|
78 |
+
Please provide a strategic analysis with:
|
79 |
+
1. Key insights and patterns
|
80 |
+
2. Practical solutions
|
81 |
+
3. Implementation priorities
|
82 |
+
4. Risk assessment
|
83 |
+
5. Clear next steps
|
84 |
+
|
85 |
+
Be decisive and solution-focused."""
|
86 |
+
|
87 |
+
try:
|
88 |
+
completion = self.client.chat.completions.create(
|
89 |
+
model="qwen/qwen3-32b",
|
90 |
+
messages=[{"role": "user", "content": prompt}],
|
91 |
+
temperature=0.6,
|
92 |
+
top_p=0.95,
|
93 |
+
max_completion_tokens=1536,
|
94 |
+
reasoning_effort="default"
|
95 |
+
)
|
96 |
+
|
97 |
+
return {
|
98 |
+
"model": "Qwen3 32B (Quick Strategist)",
|
99 |
+
"role": "π The Strategic Decision Maker",
|
100 |
+
"reasoning": completion.choices[0].message.content,
|
101 |
+
"timestamp": datetime.now().strftime("%H:%M:%S"),
|
102 |
+
"tokens_used": completion.usage.total_tokens if hasattr(completion, 'usage') else "N/A"
|
103 |
+
}
|
104 |
+
except Exception as e:
|
105 |
+
return {"error": f"Quick Strategist error: {str(e)}"}
|
106 |
+
|
107 |
+
def detail_detective_analyze(self, problem: str, context: str = "") -> Dict:
|
108 |
+
"""QwQ 32B - The Detail Detective"""
|
109 |
+
if not self.is_api_key_set:
|
110 |
+
return {"error": "API key not set"}
|
111 |
+
|
112 |
+
prompt = f"""You are the Detail Detective in a collaborative reasoning system. Your role is to provide meticulous investigation and comprehensive fact-checking.
|
113 |
+
|
114 |
+
Problem: {problem}
|
115 |
+
{f"Additional Context: {context}" if context else ""}
|
116 |
+
|
117 |
+
Please conduct a thorough investigation including:
|
118 |
+
1. Detailed analysis of all aspects
|
119 |
+
2. Potential edge cases and considerations
|
120 |
+
3. Verification of assumptions
|
121 |
+
4. Historical context or precedents
|
122 |
+
5. Comprehensive pros and cons
|
123 |
+
6. Hidden connections or implications
|
124 |
+
|
125 |
+
Be extremely thorough and leave no stone unturned."""
|
126 |
+
|
127 |
+
try:
|
128 |
+
completion = self.client.chat.completions.create(
|
129 |
+
model="qwen-qwq-32b",
|
130 |
+
messages=[{"role": "user", "content": prompt}],
|
131 |
+
temperature=0.6,
|
132 |
+
top_p=0.95,
|
133 |
+
max_completion_tokens=2048,
|
134 |
+
reasoning_format="parsed"
|
135 |
+
)
|
136 |
+
|
137 |
+
return {
|
138 |
+
"model": "QwQ 32B (Detail Detective)",
|
139 |
+
"role": "π The Meticulous Investigator",
|
140 |
+
"reasoning": completion.choices[0].message.content,
|
141 |
+
"timestamp": datetime.now().strftime("%H:%M:%S"),
|
142 |
+
"tokens_used": completion.usage.total_tokens if hasattr(completion, 'usage') else "N/A"
|
143 |
+
}
|
144 |
+
except Exception as e:
|
145 |
+
return {"error": f"Detail Detective error: {str(e)}"}
|
146 |
+
|
147 |
+
def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str:
|
148 |
+
"""Synthesize all three perspectives into a final orchestrated solution"""
|
149 |
+
if not self.is_api_key_set:
|
150 |
+
return "API key not set"
|
151 |
+
|
152 |
+
synthesis_prompt = f"""You are the Orchestra Conductor. You have received three different analytical perspectives on the same problem. Your job is to synthesize these into a comprehensive, unified solution.
|
153 |
+
|
154 |
+
ORIGINAL PROBLEM: {original_problem}
|
155 |
+
|
156 |
+
DEEP THINKER ANALYSIS:
|
157 |
+
{deep_result.get('reasoning', 'No analysis available')}
|
158 |
+
|
159 |
+
STRATEGIC ANALYSIS:
|
160 |
+
{strategic_result.get('reasoning', 'No analysis available')}
|
161 |
+
|
162 |
+
DETECTIVE INVESTIGATION:
|
163 |
+
{detective_result.get('reasoning', 'No analysis available')}
|
164 |
+
|
165 |
+
Please create a unified synthesis that:
|
166 |
+
1. Combines the best insights from all three perspectives
|
167 |
+
2. Resolves any contradictions between the analyses
|
168 |
+
3. Provides a comprehensive final recommendation
|
169 |
+
4. Highlights where the different reasoning styles complement each other
|
170 |
+
5. Gives a clear, actionable conclusion
|
171 |
+
|
172 |
+
Format your response as a well-structured final solution that leverages all three reasoning approaches."""
|
173 |
+
|
174 |
+
try:
|
175 |
+
completion = self.client.chat.completions.create(
|
176 |
+
model="qwen/qwen3-32b",
|
177 |
+
messages=[{"role": "user", "content": synthesis_prompt}],
|
178 |
+
temperature=0.7,
|
179 |
+
max_completion_tokens=2048,
|
180 |
+
top_p=0.9
|
181 |
+
)
|
182 |
+
|
183 |
+
return completion.choices[0].message.content
|
184 |
+
except Exception as e:
|
185 |
+
return f"Synthesis error: {str(e)}"
|
186 |
+
|
187 |
+
# Initialize the orchestra
|
188 |
+
orchestra = ReasoningOrchestra()
|
189 |
+
|
190 |
+
def validate_api_key(api_key: str) -> str:
|
191 |
+
"""Validate the API key and return status"""
|
192 |
+
return orchestra.set_api_key(api_key)
|
193 |
+
|
194 |
+
def run_single_model(problem: str, model_choice: str, context: str = "") -> Tuple[str, str]:
|
195 |
+
"""Run a single model analysis"""
|
196 |
+
if not orchestra.is_api_key_set:
|
197 |
+
return "β Please set your API key first", ""
|
198 |
+
|
199 |
+
if not problem.strip():
|
200 |
+
return "β Please enter a problem to analyze", ""
|
201 |
+
|
202 |
+
start_time = time.time()
|
203 |
+
|
204 |
+
if model_choice == "Deep Thinker (DeepSeek R1)":
|
205 |
+
result = orchestra.deep_thinker_analyze(problem, context)
|
206 |
+
elif model_choice == "Quick Strategist (Qwen3 32B)":
|
207 |
+
result = orchestra.quick_strategist_analyze(problem, context)
|
208 |
+
elif model_choice == "Detail Detective (QwQ 32B)":
|
209 |
+
result = orchestra.detail_detective_analyze(problem, context)
|
210 |
+
else:
|
211 |
+
return "β Invalid model selection", ""
|
212 |
+
|
213 |
+
elapsed_time = time.time() - start_time
|
214 |
+
|
215 |
+
if "error" in result:
|
216 |
+
return f"β {result['error']}", ""
|
217 |
+
|
218 |
+
formatted_output = f"""## {result['role']} - {result['model']}
|
219 |
+
**Analysis Time:** {elapsed_time:.2f} seconds | **Timestamp:** {result['timestamp']} | **Tokens:** {result['tokens_used']}
|
220 |
+
|
221 |
+
---
|
222 |
+
|
223 |
+
{result['reasoning']}
|
224 |
+
"""
|
225 |
+
|
226 |
+
return formatted_output, ""
|
227 |
+
|
228 |
+
def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str, str]:
|
229 |
+
"""Run the full collaborative reasoning orchestra"""
|
230 |
+
if not orchestra.is_api_key_set:
|
231 |
+
error_msg = "β Please set your API key first"
|
232 |
+
return error_msg, error_msg, error_msg, error_msg
|
233 |
+
|
234 |
+
if not problem.strip():
|
235 |
+
error_msg = "β Please enter a problem to analyze"
|
236 |
+
return error_msg, error_msg, error_msg, error_msg
|
237 |
+
|
238 |
+
status_updates = []
|
239 |
+
|
240 |
+
# Phase 1: Deep Thinker
|
241 |
+
status_updates.append("π Deep Thinker is analyzing the problem...")
|
242 |
+
deep_result = orchestra.deep_thinker_analyze(problem, context)
|
243 |
+
|
244 |
+
# Phase 2: Quick Strategist
|
245 |
+
status_updates.append("π Quick Strategist is developing strategies...")
|
246 |
+
strategic_result = orchestra.quick_strategist_analyze(problem, context)
|
247 |
+
|
248 |
+
# Phase 3: Detail Detective
|
249 |
+
status_updates.append("π Detail Detective is investigating thoroughly...")
|
250 |
+
detective_result = orchestra.detail_detective_analyze(problem, context)
|
251 |
+
|
252 |
+
# Phase 4: Synthesis
|
253 |
+
status_updates.append("πΌ Orchestra Conductor is synthesizing all perspectives...")
|
254 |
+
synthesis = orchestra.synthesize_orchestra(deep_result, strategic_result, detective_result, problem)
|
255 |
+
|
256 |
+
# Format outputs
|
257 |
+
def format_result(result: Dict) -> str:
|
258 |
+
if "error" in result:
|
259 |
+
return f"β {result['error']}"
|
260 |
+
|
261 |
+
return f"""## {result['role']} - {result['model']}
|
262 |
+
**Timestamp:** {result['timestamp']} | **Tokens:** {result['tokens_used']}
|
263 |
+
|
264 |
+
---
|
265 |
+
|
266 |
+
{result['reasoning']}
|
267 |
+
"""
|
268 |
+
|
269 |
+
deep_output = format_result(deep_result)
|
270 |
+
strategic_output = format_result(strategic_result)
|
271 |
+
detective_output = format_result(detective_result)
|
272 |
+
|
273 |
+
synthesis_output = f"""## πΌ Orchestra Conductor - Final Synthesis
|
274 |
+
|
275 |
+
---
|
276 |
+
|
277 |
+
{synthesis}
|
278 |
+
"""
|
279 |
+
|
280 |
+
return deep_output, strategic_output, detective_output, synthesis_output
|
281 |
+
|
282 |
+
# Custom CSS for better styling
|
283 |
+
custom_css = """
|
284 |
+
.gradio-container {
|
285 |
+
max-width: 1200px !important;
|
286 |
+
}
|
287 |
+
.api-key-section {
|
288 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
289 |
+
padding: 20px;
|
290 |
+
border-radius: 10px;
|
291 |
+
margin-bottom: 20px;
|
292 |
+
}
|
293 |
+
.model-section {
|
294 |
+
border: 2px solid #e1e5e9;
|
295 |
+
border-radius: 10px;
|
296 |
+
padding: 15px;
|
297 |
+
margin: 10px 0;
|
298 |
+
}
|
299 |
+
.orchestra-header {
|
300 |
+
text-align: center;
|
301 |
+
background: linear-gradient(45deg, #f093fb 0%, #f5576c 100%);
|
302 |
+
padding: 20px;
|
303 |
+
border-radius: 15px;
|
304 |
+
margin-bottom: 20px;
|
305 |
+
}
|
306 |
+
.status-box {
|
307 |
+
background-color: #f8f9fa;
|
308 |
+
border-left: 4px solid #28a745;
|
309 |
+
padding: 15px;
|
310 |
+
margin: 10px 0;
|
311 |
+
border-radius: 5px;
|
312 |
+
}
|
313 |
+
"""
|
314 |
+
|
315 |
+
# Build the Gradio interface
|
316 |
+
with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app:
|
317 |
+
# Header
|
318 |
+
gr.HTML("""
|
319 |
+
<div class="orchestra-header">
|
320 |
+
<h1>πΌ The Collaborative Reasoning Orchestra</h1>
|
321 |
+
<p><em>Where AI models collaborate like musicians in an orchestra to solve complex problems</em></p>
|
322 |
+
</div>
|
323 |
+
""")
|
324 |
+
|
325 |
+
# API Key Section
|
326 |
+
with gr.Group():
|
327 |
+
gr.HTML('<div class="api-key-section"><h3 style="color: white; margin-top: 0;">π API Configuration</h3></div>')
|
328 |
+
with gr.Row():
|
329 |
+
api_key_input = gr.Textbox(
|
330 |
+
label="Enter your Groq API Key",
|
331 |
+
type="password",
|
332 |
+
placeholder="gsk_...",
|
333 |
+
info="Get your free API key from https://console.groq.com/keys"
|
334 |
+
)
|
335 |
+
api_status = gr.Textbox(
|
336 |
+
label="API Status",
|
337 |
+
interactive=False,
|
338 |
+
placeholder="Enter API key to validate..."
|
339 |
+
)
|
340 |
+
|
341 |
+
validate_btn = gr.Button("π Validate API Key", variant="primary")
|
342 |
+
validate_btn.click(
|
343 |
+
fn=validate_api_key,
|
344 |
+
inputs=[api_key_input],
|
345 |
+
outputs=[api_status]
|
346 |
+
)
|
347 |
+
|
348 |
+
# Main Interface Tabs
|
349 |
+
with gr.Tabs() as tabs:
|
350 |
+
|
351 |
+
# Single Model Tab
|
352 |
+
with gr.TabItem("π― Single Model Analysis"):
|
353 |
+
gr.Markdown("### Test individual reasoning models")
|
354 |
+
|
355 |
+
with gr.Row():
|
356 |
+
with gr.Column():
|
357 |
+
single_problem = gr.Textbox(
|
358 |
+
label="Problem Statement",
|
359 |
+
placeholder="Enter the problem you want to analyze...",
|
360 |
+
lines=4
|
361 |
+
)
|
362 |
+
single_context = gr.Textbox(
|
363 |
+
label="Additional Context (Optional)",
|
364 |
+
placeholder="Any additional context or constraints...",
|
365 |
+
lines=2
|
366 |
+
)
|
367 |
+
model_choice = gr.Dropdown(
|
368 |
+
label="Choose Model",
|
369 |
+
choices=[
|
370 |
+
"Deep Thinker (DeepSeek R1)",
|
371 |
+
"Quick Strategist (Qwen3 32B)",
|
372 |
+
"Detail Detective (QwQ 32B)"
|
373 |
+
],
|
374 |
+
value="Deep Thinker (DeepSeek R1)"
|
375 |
+
)
|
376 |
+
single_analyze_btn = gr.Button("π Analyze", variant="primary")
|
377 |
+
|
378 |
+
with gr.Column():
|
379 |
+
single_output = gr.Markdown(label="Analysis Result")
|
380 |
+
|
381 |
+
single_analyze_btn.click(
|
382 |
+
fn=run_single_model,
|
383 |
+
inputs=[single_problem, model_choice, single_context],
|
384 |
+
outputs=[single_output, gr.Textbox(visible=False)]
|
385 |
+
)
|
386 |
+
|
387 |
+
# Full Orchestra Tab
|
388 |
+
with gr.TabItem("πΌ Full Orchestra Collaboration"):
|
389 |
+
gr.Markdown("### Run all three models collaboratively for comprehensive analysis")
|
390 |
+
|
391 |
+
with gr.Row():
|
392 |
+
with gr.Column(scale=1):
|
393 |
+
orchestra_problem = gr.Textbox(
|
394 |
+
label="Problem Statement",
|
395 |
+
placeholder="Enter a complex problem that benefits from multiple reasoning perspectives...",
|
396 |
+
lines=6
|
397 |
+
)
|
398 |
+
orchestra_context = gr.Textbox(
|
399 |
+
label="Additional Context (Optional)",
|
400 |
+
placeholder="Background information, constraints, or specific requirements...",
|
401 |
+
lines=3
|
402 |
+
)
|
403 |
+
orchestra_analyze_btn = gr.Button("πΌ Start Orchestra Analysis", variant="primary", size="lg")
|
404 |
+
|
405 |
+
with gr.Column(scale=2):
|
406 |
+
gr.Markdown("### π Deep Thinker Analysis")
|
407 |
+
deep_output = gr.Markdown()
|
408 |
+
|
409 |
+
gr.Markdown("### π Quick Strategist Analysis")
|
410 |
+
strategic_output = gr.Markdown()
|
411 |
+
|
412 |
+
gr.Markdown("### π Detail Detective Analysis")
|
413 |
+
detective_output = gr.Markdown()
|
414 |
+
|
415 |
+
gr.Markdown("### πΌ Final Orchestrated Solution")
|
416 |
+
synthesis_output = gr.Markdown()
|
417 |
+
|
418 |
+
orchestra_analyze_btn.click(
|
419 |
+
fn=run_full_orchestra,
|
420 |
+
inputs=[orchestra_problem, orchestra_context],
|
421 |
+
outputs=[deep_output, strategic_output, detective_output, synthesis_output]
|
422 |
+
)
|
423 |
+
|
424 |
+
# Examples Tab
|
425 |
+
with gr.TabItem("π‘ Example Problems"):
|
426 |
+
gr.Markdown("""
|
427 |
+
### Try these example problems to see the Orchestra in action:
|
428 |
+
|
429 |
+
**π’ Business Strategy:**
|
430 |
+
"Our tech startup has limited funding and needs to decide between focusing on product development or marketing. We have a working MVP but low user adoption."
|
431 |
+
|
432 |
+
**π€ Ethical AI:**
|
433 |
+
"Should autonomous vehicles prioritize passenger safety over pedestrian safety in unavoidable accident scenarios? Consider the ethical, legal, and practical implications."
|
434 |
+
|
435 |
+
**π Environmental Policy:**
|
436 |
+
"Design a policy framework to reduce carbon emissions in urban areas while maintaining economic growth and social equity."
|
437 |
+
|
438 |
+
**𧬠Scientific Research:**
|
439 |
+
"We've discovered a potential breakthrough in gene therapy, but it requires human trials. How should we proceed given the risks, benefits, and regulatory requirements?"
|
440 |
+
|
441 |
+
**π Educational Innovation:**
|
442 |
+
"How can we redesign traditional university education to better prepare students for the rapidly changing job market of the 2030s?"
|
443 |
+
|
444 |
+
**π Urban Planning:**
|
445 |
+
"A city wants to build affordable housing but faces opposition from current residents, environmental concerns, and budget constraints. Develop a comprehensive solution."
|
446 |
+
""")
|
447 |
+
|
448 |
+
# Footer
|
449 |
+
gr.HTML("""
|
450 |
+
<div style="text-align: center; margin-top: 30px; padding: 20px; background-color: #f8f9fa; border-radius: 10px;">
|
451 |
+
<h4>πΌ How the Orchestra Works</h4>
|
452 |
+
<p><strong>Deep Thinker (DeepSeek R1):</strong> Provides thorough philosophical and theoretical analysis</p>
|
453 |
+
<p><strong>Quick Strategist (Qwen3 32B):</strong> Delivers practical strategies and action plans</p>
|
454 |
+
<p><strong>Detail Detective (QwQ 32B):</strong> Conducts comprehensive investigation and fact-checking</p>
|
455 |
+
<p><strong>Orchestra Conductor:</strong> Synthesizes all perspectives into a unified solution</p>
|
456 |
+
<br>
|
457 |
+
<p><em>Built with β€οΈ using Groq's lightning-fast inference and Gradio</em></p>
|
458 |
+
</div>
|
459 |
+
""")
|
460 |
+
|
461 |
+
# Launch the app
|
462 |
+
if __name__ == "__main__":
|
463 |
+
app.launch(
|
464 |
+
share=True
|
465 |
+
)
|