shukdevdatta123 commited on
Commit
0fc2023
Β·
verified Β·
1 Parent(s): 25bf549

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +195 -179
app.py CHANGED
@@ -22,9 +22,9 @@ class ReasoningOrchestra:
22
  self.client = Groq(api_key=api_key.strip())
23
  # Test the connection with a simple request
24
  test_completion = self.client.chat.completions.create(
25
- model="llama3-8b-8192", # Using a smaller model for a quick validation
26
  messages=[{"role": "user", "content": "Hello"}],
27
- max_tokens=10
28
  )
29
  self.is_api_key_set = True
30
  return "βœ… API key validated successfully! You can now use the Reasoning Orchestra."
@@ -37,7 +37,7 @@ class ReasoningOrchestra:
37
  if not text or text.strip() == "" or text == "No response generated":
38
  return "<p style='color: #666; font-style: italic;'>No content was generated. This might be due to API limitations or model availability issues.</p>"
39
 
40
- # Escape HTML characters first to prevent injection
41
  text = html.escape(text)
42
 
43
  # Convert markdown-style formatting to HTML
@@ -56,53 +56,39 @@ class ReasoningOrchestra:
56
  text = re.sub(r'```(.*?)```', r'<pre><code>\1</code></pre>', text, flags=re.DOTALL)
57
  text = re.sub(r'`(.*?)`', r'<code>\1</code>', text)
58
 
59
- # Lists (improved logic)
60
  lines = text.split('\n')
61
- in_ul = False
62
- in_ol = False
63
- html_lines = []
64
-
65
  for line in lines:
66
  stripped = line.strip()
67
- is_ul_item = stripped.startswith('- ') or stripped.startswith('* ')
68
- is_ol_item = re.match(r'^\d+\.\s', stripped)
69
-
70
- # Close lists if the pattern changes
71
- if in_ul and not is_ul_item:
72
- html_lines.append('</ul>')
73
- in_ul = False
74
- if in_ol and not is_ol_item:
75
- html_lines.append('</ol>')
76
- in_ol = False
77
-
78
- # Process list items
79
- if is_ul_item:
80
- if not in_ul:
81
- html_lines.append('<ul>')
82
- in_ul = True
83
- html_lines.append(f'<li>{stripped[2:]}</li>')
84
- elif is_ol_item:
85
- if not in_ol:
86
- html_lines.append('<ol>')
87
- in_ol = True
88
- html_lines.append(f'<li>{re.sub(r"^\d+\.\\s", "", stripped)}</li>')
89
  else:
90
- # Regular paragraph or empty line
 
 
91
  if stripped:
92
- html_lines.append(f'<p>{line}</p>')
93
  else:
94
- html_lines.append('<br>')
95
 
96
- # Close any open lists at the end
97
- if in_ul:
98
- html_lines.append('</ul>')
99
- if in_ol:
100
- html_lines.append('</ol>')
101
-
102
- return '\n'.join(html_lines)
103
 
104
  def deep_thinker_analyze(self, problem: str, context: str = "") -> Dict:
105
- """DeepSeek Coder V2 - The Deep Thinker"""
106
  if not self.is_api_key_set:
107
  return {"error": "API key not set"}
108
 
@@ -113,11 +99,12 @@ Please provide a comprehensive analysis with deep reasoning. Think through all i
113
 
114
  try:
115
  completion = self.client.chat.completions.create(
116
- model="deepseek-coder-v2-lite-instruct",
117
  messages=[{"role": "user", "content": prompt}],
118
  temperature=0.6,
119
- max_tokens=8192,
120
  top_p=0.95,
 
121
  )
122
 
123
  response_content = completion.choices[0].message.content
@@ -125,7 +112,7 @@ Please provide a comprehensive analysis with deep reasoning. Think through all i
125
  response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues."
126
 
127
  return {
128
- "model": "DeepSeek Coder V2 (Deep Thinker)",
129
  "role": "🎭 The Philosopher & Deep Analyzer",
130
  "reasoning": response_content,
131
  "timestamp": datetime.now().strftime("%H:%M:%S"),
@@ -135,7 +122,7 @@ Please provide a comprehensive analysis with deep reasoning. Think through all i
135
  return {"error": f"Deep Thinker error: {str(e)}"}
136
 
137
  def quick_strategist_analyze(self, problem: str, context: str = "") -> Dict:
138
- """Llama3 70B - The Quick Strategist"""
139
  if not self.is_api_key_set:
140
  return {"error": "API key not set"}
141
 
@@ -152,11 +139,11 @@ Be decisive and solution-focused. Provide concrete, actionable recommendations."
152
 
153
  try:
154
  completion = self.client.chat.completions.create(
155
- model="llama3-70b-8192",
156
  messages=[{"role": "user", "content": prompt}],
157
  temperature=0.6,
158
  top_p=0.95,
159
- max_tokens=8192
160
  )
161
 
162
  response_content = completion.choices[0].message.content
@@ -164,7 +151,7 @@ Be decisive and solution-focused. Provide concrete, actionable recommendations."
164
  response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues."
165
 
166
  return {
167
- "model": "Llama3 70B (Quick Strategist)",
168
  "role": "πŸš€ The Strategic Decision Maker",
169
  "reasoning": response_content,
170
  "timestamp": datetime.now().strftime("%H:%M:%S"),
@@ -174,7 +161,7 @@ Be decisive and solution-focused. Provide concrete, actionable recommendations."
174
  return {"error": f"Quick Strategist error: {str(e)}"}
175
 
176
  def detail_detective_analyze(self, problem: str, context: str = "") -> Dict:
177
- """Mixtral 8x7B - The Detail Detective"""
178
  if not self.is_api_key_set:
179
  return {"error": "API key not set"}
180
 
@@ -191,33 +178,50 @@ Please conduct a thorough investigation including:
191
  Be extremely thorough and leave no stone unturned. Provide detailed evidence and reasoning for your conclusions."""
192
 
193
  try:
 
194
  completion = self.client.chat.completions.create(
195
- model="mixtral-8x7b-32768",
196
  messages=[{"role": "user", "content": prompt}],
197
  temperature=0.7,
198
  top_p=0.9,
199
- max_tokens=8192
200
  )
201
 
202
  response_content = completion.choices[0].message.content
203
  if not response_content or response_content.strip() == "":
204
- response_content = "The model did not generate a response. This might be due to content filtering or other API issues."
205
-
 
 
 
 
 
 
 
 
 
 
 
206
  return {
207
- "model": "Mixtral 8x7B (Detail Detective)",
208
  "role": "πŸ” The Meticulous Investigator",
209
  "reasoning": response_content,
210
  "timestamp": datetime.now().strftime("%H:%M:%S"),
211
  "tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A"
212
  }
213
  except Exception as e:
214
- return {"error": f"Detail Detective error: {str(e)}"}
 
 
 
 
215
 
216
  def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str:
217
- """Synthesize all three perspectives into a final orchestrated solution using Llama3 70B"""
218
  if not self.is_api_key_set:
219
  return "API key not set"
220
 
 
221
  def extract_reasoning(result: Dict, model_name: str) -> str:
222
  if result.get('error'):
223
  return f"**{model_name} encountered an issue:** {result['error']}"
@@ -230,30 +234,30 @@ Be extremely thorough and leave no stone unturned. Provide detailed evidence and
230
  strategic_reasoning = extract_reasoning(strategic_result, "Quick Strategist")
231
  detective_reasoning = extract_reasoning(detective_result, "Detail Detective")
232
 
233
- synthesis_prompt = f"""You are the Orchestra Conductor using Llama3 70B. You have received analytical perspectives from three different AI reasoning specialists on the same problem. Your job is to synthesize these into a comprehensive, unified solution.
234
  ORIGINAL PROBLEM: {original_problem}
235
- DEEP THINKER ANALYSIS (🎭 DeepSeek Coder V2):
236
  {deep_reasoning}
237
- STRATEGIC ANALYSIS (πŸš€ Llama3 70B):
238
  {strategic_reasoning}
239
- DETECTIVE INVESTIGATION (πŸ” Mixtral 8x7B):
240
  {detective_reasoning}
241
  As the Orchestra Conductor, please create a unified synthesis that:
242
- 1. Combines the best insights from all available analyses.
243
- 2. Addresses any gaps where models didn't provide input.
244
- 3. Resolves any contradictions between the analyses.
245
- 4. Provides a comprehensive final recommendation.
246
- 5. Highlights where the different reasoning styles complement each other.
247
- 6. Gives a clear, actionable conclusion.
248
  If some models didn't provide analysis, work with what's available and note any limitations.
249
  Format your response as a well-structured final solution that leverages all available reasoning approaches. Use clear sections and bullet points where appropriate for maximum clarity."""
250
 
251
  try:
252
  completion = self.client.chat.completions.create(
253
- model="llama3-70b-8192",
254
  messages=[{"role": "user", "content": synthesis_prompt}],
255
  temperature=0.7,
256
- max_tokens=8192,
257
  top_p=0.9
258
  )
259
 
@@ -288,11 +292,11 @@ def run_single_model(problem: str, model_choice: str, context: str = "") -> str:
288
 
289
  start_time = time.time()
290
 
291
- if model_choice == "Deep Thinker (DeepSeek Coder V2)":
292
  result = orchestra.deep_thinker_analyze(problem, context)
293
- elif model_choice == "Quick Strategist (Llama3 70B)":
294
  result = orchestra.quick_strategist_analyze(problem, context)
295
- elif model_choice == "Detail Detective (Mixtral 8x7B)":
296
  result = orchestra.detail_detective_analyze(problem, context)
297
  else:
298
  return """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;">
@@ -308,6 +312,7 @@ def run_single_model(problem: str, model_choice: str, context: str = "") -> str:
308
  <p>{result['error']}</p>
309
  </div>"""
310
 
 
311
  reasoning_html = orchestra.format_text_to_html(result['reasoning'])
312
 
313
  formatted_output = f"""
@@ -317,7 +322,7 @@ def run_single_model(problem: str, model_choice: str, context: str = "") -> str:
317
  </div>
318
 
319
  <div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;">
320
- <div style="display: flex; flex-wrap: wrap; gap: 20px; font-size: 14px; color: #666;">
321
  <span><strong>Model:</strong> {result['model']}</span>
322
  <span><strong>Analysis Time:</strong> {elapsed_time:.2f} seconds</span>
323
  <span><strong>Timestamp:</strong> {result['timestamp']}</span>
@@ -325,7 +330,7 @@ def run_single_model(problem: str, model_choice: str, context: str = "") -> str:
325
  </div>
326
  </div>
327
 
328
- <div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6; font-size: 16px;">
329
  {reasoning_html}
330
  </div>
331
  </div>
@@ -349,11 +354,16 @@ def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str,
349
  </div>"""
350
  return error_msg, error_msg, error_msg, error_msg
351
 
352
- # NOTE: These calls are synchronous. For a more advanced version,
353
- # you could use threading to run them concurrently.
354
  deep_result = orchestra.deep_thinker_analyze(problem, context)
 
 
355
  strategic_result = orchestra.quick_strategist_analyze(problem, context)
 
 
356
  detective_result = orchestra.detail_detective_analyze(problem, context)
 
 
357
  synthesis = orchestra.synthesize_orchestra(deep_result, strategic_result, detective_result, problem)
358
 
359
  def format_result_html(result: Dict, color: str, icon: str) -> str:
@@ -374,13 +384,13 @@ def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str,
374
  </div>
375
 
376
  <div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;">
377
- <div style="display: flex; flex-wrap: wrap; gap: 20px; font-size: 14px; color: #666;">
378
  <span><strong>Timestamp:</strong> {result['timestamp']}</span>
379
  <span><strong>Tokens:</strong> {result['tokens_used']}</span>
380
  </div>
381
  </div>
382
 
383
- <div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6; font-size: 16px;">
384
  {reasoning_html}
385
  </div>
386
  </div>
@@ -395,10 +405,10 @@ def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str,
395
  <div style="border: 2px solid #dc3545; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #fff5f5 0%, #fee);">
396
  <div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid #dc3545;">
397
  <span style="font-size: 24px; margin-right: 10px;">🎼</span>
398
- <h2 style="margin: 0; color: #dc3545;">Orchestra Conductor - Final Synthesis (Llama3 70B)</h2>
399
  </div>
400
 
401
- <div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6; font-size: 16px;">
402
  {synthesis_html}
403
  </div>
404
  </div>
@@ -408,7 +418,6 @@ def run_full_orchestra(problem: str, context: str = "") -> Tuple[str, str, str,
408
 
409
  # Custom CSS for better styling
410
  custom_css = """
411
- body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; }
412
  .gradio-container {
413
  max-width: 1400px !important;
414
  margin: 0 auto !important;
@@ -419,29 +428,35 @@ body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; }
419
  border-radius: 10px;
420
  margin-bottom: 20px;
421
  }
 
 
 
 
 
 
422
  .orchestra-header {
423
  text-align: center;
424
  background: linear-gradient(45deg, #f093fb 0%, #f5576c 100%);
425
  padding: 20px;
426
  border-radius: 15px;
427
  margin-bottom: 20px;
428
- color: white;
429
  }
430
- /* Custom styling for HTML output containers */
 
 
 
 
 
 
 
431
  .html-content {
432
- max-height: 600px; /* Set a max height for scrollability */
433
- overflow-y: auto; /* Enable vertical scrolling */
434
  border: 1px solid #ddd;
435
  border-radius: 8px;
436
- padding: 15px;
437
- background-color: #fdfdfd;
438
  }
439
- .html-content h1, .html-content h2, .html-content h3 { color: #333; }
440
- .html-content p { margin-bottom: 1em; }
441
- .html-content ul, .html-content ol { padding-left: 25px; }
442
- .html-content li { margin-bottom: 0.5em; }
443
- .html-content pre { background-color: #f4f4f4; padding: 10px; border-radius: 5px; white-space: pre-wrap; word-wrap: break-word; }
444
- .html-content code { font-family: 'Courier New', Courier, monospace; background-color: #eee; padding: 2px 4px; border-radius: 3px;}
445
  """
446
 
447
  # Build the Gradio interface
@@ -450,91 +465,104 @@ with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app:
450
  gr.HTML("""
451
  <div class="orchestra-header">
452
  <h1>🎼 The Collaborative Reasoning Orchestra</h1>
453
- <p><em>Where AI models collaborate like musicians to solve complex problems</em></p>
454
- <p><strong>Powered by Groq's LPUβ„’ Inference Engine for real-time results</strong></p>
455
  </div>
456
  """)
457
 
458
  # API Key Section
459
- with gr.Accordion("πŸ”‘ API Configuration", open=True):
460
- with gr.Group():
461
- gr.HTML('<div class="api-key-section"><h3 style="color: white; margin-top: 0;">Enter Your Groq API Key</h3></div>')
462
- with gr.Row():
463
- api_key_input = gr.Textbox(
464
- label="Groq API Key",
465
- type="password",
466
- placeholder="gsk_...",
467
- info="Get your free API key from https://console.groq.com/keys"
468
- )
469
- api_status = gr.Textbox(
470
- label="API Status",
471
- interactive=False,
472
- placeholder="Enter API key and click validate..."
473
- )
474
-
475
- validate_btn = gr.Button("πŸ” Validate API Key", variant="primary")
 
 
 
 
476
 
477
  # Main Interface Tabs
478
  with gr.Tabs() as tabs:
479
 
480
  # Single Model Tab
481
  with gr.TabItem("🎯 Single Model Analysis"):
482
- gr.Markdown("### Test individual reasoning models with formatted HTML output.")
483
 
484
- with gr.Row(gap=16):
485
  with gr.Column(scale=1):
486
  single_problem = gr.Textbox(
487
  label="Problem Statement",
488
  placeholder="Enter the problem you want to analyze...",
489
- lines=5
490
  )
491
  single_context = gr.Textbox(
492
  label="Additional Context (Optional)",
493
  placeholder="Any additional context or constraints...",
494
- lines=3
495
  )
496
  model_choice = gr.Dropdown(
497
  label="Choose Model",
498
  choices=[
499
- "Deep Thinker (DeepSeek Coder V2)",
500
- "Quick Strategist (Llama3 70B)",
501
- "Detail Detective (Mixtral 8x7B)"
502
  ],
503
- value="Quick Strategist (Llama3 70B)"
504
  )
505
- single_analyze_btn = gr.Button("πŸš€ Analyze Problem", variant="primary")
506
 
507
  with gr.Column(scale=2):
508
  single_output = gr.HTML(label="Analysis Result", elem_classes=["html-content"])
 
 
 
 
 
 
509
 
510
  # Full Orchestra Tab
511
  with gr.TabItem("🎼 Full Orchestra Collaboration"):
512
- gr.Markdown("### Run all models collaboratively with a Llama3 70B conductor to synthesize a final solution.")
513
 
514
- with gr.Row(gap=16):
515
- with gr.Column(scale=1):
516
- orchestra_problem = gr.Textbox(
517
- label="Complex Problem Statement",
518
- placeholder="Enter a problem that benefits from multiple reasoning perspectives...",
519
- lines=8
520
- )
521
- orchestra_context = gr.Textbox(
522
- label="Additional Context (Optional)",
523
- placeholder="Background information, constraints, etc...",
524
- lines=4
525
- )
526
- orchestra_analyze_btn = gr.Button("🎼 Start Orchestra Analysis", variant="primary")
 
527
 
528
- with gr.Column(scale=2):
529
- with gr.Tabs():
530
- with gr.TabItem("🎼 Final Synthesis"):
531
- synthesis_output = gr.HTML(label="Final Orchestrated Solution (Llama3 70B)", elem_classes=["html-content"])
532
- with gr.TabItem("🎭 Deep Thinker"):
533
- deep_output = gr.HTML(label="Deep Thinker Analysis", elem_classes=["html-content"])
534
- with gr.TabItem("πŸš€ Quick Strategist"):
535
- strategic_output = gr.HTML(label="Quick Strategist Analysis", elem_classes=["html-content"])
536
- with gr.TabItem("πŸ” Detail Detective"):
537
- detective_output = gr.HTML(label="Detail Detective Analysis", elem_classes=["html-content"])
 
 
538
 
539
  # Examples Tab
540
  with gr.TabItem("πŸ’‘ Example Problems"):
@@ -544,63 +572,51 @@ with gr.Blocks(css=custom_css, title="Reasoning Orchestra") as app:
544
  **🏒 Business Strategy:**
545
  "Our tech startup has limited funding and needs to decide between focusing on product development or marketing. We have a working MVP but low user adoption. Budget is $50K for the next 6 months."
546
 
547
- **πŸ€– Ethical AI:** "Should autonomous vehicles prioritize passenger safety over pedestrian safety in unavoidable accident scenarios? Consider the ethical, legal, and practical implications for mass adoption."
 
548
 
549
  **🌍 Environmental Policy:**
550
  "Design a policy framework to reduce carbon emissions in urban areas by 40% within 10 years while maintaining economic growth and social equity."
551
 
 
 
 
552
  **πŸŽ“ Educational Innovation:**
553
  "How can we redesign traditional university education to better prepare students for the rapidly changing job market of the 2030s, considering AI, remote work, and emerging technologies?"
554
 
555
  **🏠 Urban Planning:**
556
  "A city of 500K people wants to build 10,000 affordable housing units but faces opposition from current residents, environmental concerns, and a $2B budget constraint. Develop a comprehensive solution."
 
 
 
557
  """)
558
 
559
  # Footer
560
  gr.HTML("""
561
- <div style="text-align: center; margin-top: 30px; padding: 20px; background: #f8f9fa; border-radius: 15px; border: 1px solid #dee2e6;">
562
  <h3>🎼 How the Orchestra Works</h3>
563
- <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 20px; margin: 20px 0; text-align: left;">
564
- <div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;">
565
- <h4>🎭 Deep Thinker</h4>
566
- <p>Provides thorough philosophical and theoretical analysis with comprehensive reasoning chains.</p>
567
  </div>
568
- <div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;">
569
- <h4>πŸš€ Quick Strategist</h4>
570
- <p>Delivers practical strategies, action plans, and rapid decision-making frameworks.</p>
571
  </div>
572
- <div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;">
573
- <h4>πŸ” Detail Detective</h4>
574
- <p>Conducts comprehensive investigation, fact-checking, and finds hidden connections.</p>
575
  </div>
576
- <div style="background: #fff; padding: 15px; border-radius: 10px; border: 1px solid #eee;">
577
  <h4>🎼 Orchestra Conductor</h4>
578
- <p>Synthesizes all perspectives into a unified, comprehensive final solution.</p>
579
  </div>
580
  </div>
581
- <p style="margin-top: 20px; color: #6c757d;"><em>Built with ❀️ using Groq, Gradio, and Python</em></p>
582
  </div>
583
  """)
584
 
585
- # --- Event Handlers ---
586
- validate_btn.click(
587
- fn=validate_api_key,
588
- inputs=[api_key_input],
589
- outputs=[api_status]
590
- )
591
-
592
- single_analyze_btn.click(
593
- fn=run_single_model,
594
- inputs=[single_problem, model_choice, single_context],
595
- outputs=[single_output]
596
- )
597
-
598
- orchestra_analyze_btn.click(
599
- fn=run_full_orchestra,
600
- inputs=[orchestra_problem, orchestra_context],
601
- outputs=[deep_output, strategic_output, detective_output, synthesis_output]
602
- )
603
-
604
  # Launch the app
605
  if __name__ == "__main__":
606
- app.launch(share=False) # Set share=True to create a temporary public link
 
22
  self.client = Groq(api_key=api_key.strip())
23
  # Test the connection with a simple request
24
  test_completion = self.client.chat.completions.create(
25
+ model="qwen/qwen3-32b",
26
  messages=[{"role": "user", "content": "Hello"}],
27
+ max_completion_tokens=10
28
  )
29
  self.is_api_key_set = True
30
  return "βœ… API key validated successfully! You can now use the Reasoning Orchestra."
 
37
  if not text or text.strip() == "" or text == "No response generated":
38
  return "<p style='color: #666; font-style: italic;'>No content was generated. This might be due to API limitations or model availability issues.</p>"
39
 
40
+ # Escape HTML characters first
41
  text = html.escape(text)
42
 
43
  # Convert markdown-style formatting to HTML
 
56
  text = re.sub(r'```(.*?)```', r'<pre><code>\1</code></pre>', text, flags=re.DOTALL)
57
  text = re.sub(r'`(.*?)`', r'<code>\1</code>', text)
58
 
59
+ # Lists
60
  lines = text.split('\n')
61
+ in_list = False
62
+ formatted_lines = []
63
+
 
64
  for line in lines:
65
  stripped = line.strip()
66
+ if stripped.startswith('- ') or stripped.startswith('* '):
67
+ if not in_list:
68
+ formatted_lines.append('<ul>')
69
+ in_list = True
70
+ formatted_lines.append(f'<li>{stripped[2:]}</li>')
71
+ elif stripped.startswith(('1. ', '2. ', '3. ', '4. ', '5. ', '6. ', '7. ', '8. ', '9. ')):
72
+ if not in_list:
73
+ formatted_lines.append('<ol>')
74
+ in_list = True
75
+ formatted_lines.append(f'<li>{stripped[3:]}</li>')
 
 
 
 
 
 
 
 
 
 
 
 
76
  else:
77
+ if in_list:
78
+ formatted_lines.append('</ul>' if any('<li>' in line for line in formatted_lines[-5:]) else '</ol>')
79
+ in_list = False
80
  if stripped:
81
+ formatted_lines.append(f'<p>{line}</p>')
82
  else:
83
+ formatted_lines.append('<br>')
84
 
85
+ if in_list:
86
+ formatted_lines.append('</ul>')
87
+
88
+ return '\n'.join(formatted_lines)
 
 
 
89
 
90
  def deep_thinker_analyze(self, problem: str, context: str = "") -> Dict:
91
+ """DeepSeek R1 - The Deep Thinker"""
92
  if not self.is_api_key_set:
93
  return {"error": "API key not set"}
94
 
 
99
 
100
  try:
101
  completion = self.client.chat.completions.create(
102
+ model="deepseek-r1-distill-llama-70b",
103
  messages=[{"role": "user", "content": prompt}],
104
  temperature=0.6,
105
+ max_completion_tokens=8192,
106
  top_p=0.95,
107
+ reasoning_format="raw"
108
  )
109
 
110
  response_content = completion.choices[0].message.content
 
112
  response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues."
113
 
114
  return {
115
+ "model": "DeepSeek R1 (Deep Thinker)",
116
  "role": "🎭 The Philosopher & Deep Analyzer",
117
  "reasoning": response_content,
118
  "timestamp": datetime.now().strftime("%H:%M:%S"),
 
122
  return {"error": f"Deep Thinker error: {str(e)}"}
123
 
124
  def quick_strategist_analyze(self, problem: str, context: str = "") -> Dict:
125
+ """Qwen3 32B - The Quick Strategist"""
126
  if not self.is_api_key_set:
127
  return {"error": "API key not set"}
128
 
 
139
 
140
  try:
141
  completion = self.client.chat.completions.create(
142
+ model="qwen/qwen3-32b",
143
  messages=[{"role": "user", "content": prompt}],
144
  temperature=0.6,
145
  top_p=0.95,
146
+ max_completion_tokens=8192
147
  )
148
 
149
  response_content = completion.choices[0].message.content
 
151
  response_content = "The model did not generate a response. This could be due to content filtering, model limitations, or API issues."
152
 
153
  return {
154
+ "model": "Qwen3 32B (Quick Strategist)",
155
  "role": "πŸš€ The Strategic Decision Maker",
156
  "reasoning": response_content,
157
  "timestamp": datetime.now().strftime("%H:%M:%S"),
 
161
  return {"error": f"Quick Strategist error: {str(e)}"}
162
 
163
  def detail_detective_analyze(self, problem: str, context: str = "") -> Dict:
164
+ """QwQ 32B - The Detail Detective"""
165
  if not self.is_api_key_set:
166
  return {"error": "API key not set"}
167
 
 
178
  Be extremely thorough and leave no stone unturned. Provide detailed evidence and reasoning for your conclusions."""
179
 
180
  try:
181
+ # Try with different parameters for QwQ model
182
  completion = self.client.chat.completions.create(
183
+ model="qwen-qwq-32b",
184
  messages=[{"role": "user", "content": prompt}],
185
  temperature=0.7,
186
  top_p=0.9,
187
+ max_completion_tokens=8192
188
  )
189
 
190
  response_content = completion.choices[0].message.content
191
  if not response_content or response_content.strip() == "":
192
+ # Fallback: try with a simpler prompt
193
+ fallback_prompt = f"Analyze this problem in detail: {problem}"
194
+ fallback_completion = self.client.chat.completions.create(
195
+ model="qwen-qwq-32b",
196
+ messages=[{"role": "user", "content": fallback_prompt}],
197
+ temperature=0.5,
198
+ max_completion_tokens=8192
199
+ )
200
+ response_content = fallback_completion.choices[0].message.content
201
+
202
+ if not response_content or response_content.strip() == "":
203
+ response_content = "The QwQ model encountered an issue generating content. This could be due to the complexity of the prompt, content filtering, or temporary model availability issues. The model may work better with simpler, more direct questions."
204
+
205
  return {
206
+ "model": "QwQ 32B (Detail Detective)",
207
  "role": "πŸ” The Meticulous Investigator",
208
  "reasoning": response_content,
209
  "timestamp": datetime.now().strftime("%H:%M:%S"),
210
  "tokens_used": getattr(completion.usage, 'total_tokens', 'N/A') if hasattr(completion, 'usage') and completion.usage else "N/A"
211
  }
212
  except Exception as e:
213
+ # If QwQ fails, provide a helpful error message
214
+ error_msg = f"Detail Detective error: {str(e)}"
215
+ if "model" in str(e).lower() or "not found" in str(e).lower():
216
+ error_msg += "\n\nNote: The QwQ model may not be available in your region or may have usage restrictions. You can still use the other models in the orchestra."
217
+ return {"error": error_msg}
218
 
219
  def synthesize_orchestra(self, deep_result: Dict, strategic_result: Dict, detective_result: Dict, original_problem: str) -> str:
220
+ """Synthesize all three perspectives into a final orchestrated solution using Llama 3.3 70B"""
221
  if not self.is_api_key_set:
222
  return "API key not set"
223
 
224
+ # Extract reasoning content safely with better error handling
225
  def extract_reasoning(result: Dict, model_name: str) -> str:
226
  if result.get('error'):
227
  return f"**{model_name} encountered an issue:** {result['error']}"
 
234
  strategic_reasoning = extract_reasoning(strategic_result, "Quick Strategist")
235
  detective_reasoning = extract_reasoning(detective_result, "Detail Detective")
236
 
237
+ synthesis_prompt = f"""You are the Orchestra Conductor using Llama 3.3 70B Versatile model. You have received analytical perspectives from three different AI reasoning specialists on the same problem. Your job is to synthesize these into a comprehensive, unified solution.
238
  ORIGINAL PROBLEM: {original_problem}
239
+ DEEP THINKER ANALYSIS (🎭 DeepSeek R1):
240
  {deep_reasoning}
241
+ STRATEGIC ANALYSIS (πŸš€ Qwen3 32B):
242
  {strategic_reasoning}
243
+ DETECTIVE INVESTIGATION (πŸ” QwQ 32B):
244
  {detective_reasoning}
245
  As the Orchestra Conductor, please create a unified synthesis that:
246
+ 1. Combines the best insights from all available analyses
247
+ 2. Addresses any gaps where models didn't provide input
248
+ 3. Resolves any contradictions between the analyses
249
+ 4. Provides a comprehensive final recommendation
250
+ 5. Highlights where the different reasoning styles complement each other
251
+ 6. Gives a clear, actionable conclusion
252
  If some models didn't provide analysis, work with what's available and note any limitations.
253
  Format your response as a well-structured final solution that leverages all available reasoning approaches. Use clear sections and bullet points where appropriate for maximum clarity."""
254
 
255
  try:
256
  completion = self.client.chat.completions.create(
257
+ model="llama-3.3-70b-versatile",
258
  messages=[{"role": "user", "content": synthesis_prompt}],
259
  temperature=0.7,
260
+ max_completion_tokens=8192,
261
  top_p=0.9
262
  )
263
 
 
292
 
293
  start_time = time.time()
294
 
295
+ if model_choice == "Deep Thinker (DeepSeek R1)":
296
  result = orchestra.deep_thinker_analyze(problem, context)
297
+ elif model_choice == "Quick Strategist (Qwen3 32B)":
298
  result = orchestra.quick_strategist_analyze(problem, context)
299
+ elif model_choice == "Detail Detective (QwQ 32B)":
300
  result = orchestra.detail_detective_analyze(problem, context)
301
  else:
302
  return """<div style="color: red; padding: 20px; border: 2px solid red; border-radius: 10px; background-color: #ffe6e6;">
 
312
  <p>{result['error']}</p>
313
  </div>"""
314
 
315
+ # Format the response as HTML
316
  reasoning_html = orchestra.format_text_to_html(result['reasoning'])
317
 
318
  formatted_output = f"""
 
322
  </div>
323
 
324
  <div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;">
325
+ <div style="display: flex; gap: 20px; font-size: 14px; color: #666;">
326
  <span><strong>Model:</strong> {result['model']}</span>
327
  <span><strong>Analysis Time:</strong> {elapsed_time:.2f} seconds</span>
328
  <span><strong>Timestamp:</strong> {result['timestamp']}</span>
 
330
  </div>
331
  </div>
332
 
333
+ <div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6;">
334
  {reasoning_html}
335
  </div>
336
  </div>
 
354
  </div>"""
355
  return error_msg, error_msg, error_msg, error_msg
356
 
357
+ # Phase 1: Deep Thinker
 
358
  deep_result = orchestra.deep_thinker_analyze(problem, context)
359
+
360
+ # Phase 2: Quick Strategist
361
  strategic_result = orchestra.quick_strategist_analyze(problem, context)
362
+
363
+ # Phase 3: Detail Detective
364
  detective_result = orchestra.detail_detective_analyze(problem, context)
365
+
366
+ # Phase 4: Synthesis using Llama 3.3 70B
367
  synthesis = orchestra.synthesize_orchestra(deep_result, strategic_result, detective_result, problem)
368
 
369
  def format_result_html(result: Dict, color: str, icon: str) -> str:
 
384
  </div>
385
 
386
  <div style="background-color: white; padding: 15px; border-radius: 10px; margin-bottom: 20px;">
387
+ <div style="display: flex; gap: 20px; font-size: 14px; color: #666;">
388
  <span><strong>Timestamp:</strong> {result['timestamp']}</span>
389
  <span><strong>Tokens:</strong> {result['tokens_used']}</span>
390
  </div>
391
  </div>
392
 
393
+ <div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6;">
394
  {reasoning_html}
395
  </div>
396
  </div>
 
405
  <div style="border: 2px solid #dc3545; border-radius: 15px; padding: 25px; margin: 15px 0; background: linear-gradient(135deg, #fff5f5 0%, #fee);">
406
  <div style="display: flex; align-items: center; margin-bottom: 20px; padding-bottom: 15px; border-bottom: 2px solid #dc3545;">
407
  <span style="font-size: 24px; margin-right: 10px;">🎼</span>
408
+ <h2 style="margin: 0; color: #dc3545;">Orchestra Conductor - Final Synthesis (Llama 3.3 70B Versatile)</h2>
409
  </div>
410
 
411
+ <div style="background-color: white; padding: 20px; border-radius: 10px; line-height: 1.6;">
412
  {synthesis_html}
413
  </div>
414
  </div>
 
418
 
419
  # Custom CSS for better styling
420
  custom_css = """
 
421
  .gradio-container {
422
  max-width: 1400px !important;
423
  margin: 0 auto !important;
 
428
  border-radius: 10px;
429
  margin-bottom: 20px;
430
  }
431
+ .model-section {
432
+ border: 2px solid #e1e5e9;
433
+ border-radius: 10px;
434
+ padding: 15px;
435
+ margin: 10px 0;
436
+ }
437
  .orchestra-header {
438
  text-align: center;
439
  background: linear-gradient(45deg, #f093fb 0%, #f5576c 100%);
440
  padding: 20px;
441
  border-radius: 15px;
442
  margin-bottom: 20px;
 
443
  }
444
+ .status-box {
445
+ background-color: #f8f9fa;
446
+ border-left: 4px solid #28a745;
447
+ padding: 15px;
448
+ margin: 10px 0;
449
+ border-radius: 5px;
450
+ }
451
+ /* Custom styling for HTML outputs */
452
  .html-content {
453
+ max-height: 600px;
454
+ overflow-y: auto;
455
  border: 1px solid #ddd;
456
  border-radius: 8px;
457
+ padding: 10px;
458
+ background-color: #fafafa;
459
  }
 
 
 
 
 
 
460
  """
461
 
462
  # Build the Gradio interface
 
465
  gr.HTML("""
466
  <div class="orchestra-header">
467
  <h1>🎼 The Collaborative Reasoning Orchestra</h1>
468
+ <p><em>Where AI models collaborate like musicians in an orchestra to solve complex problems</em></p>
469
+ <p><strong>Now with Llama 3.3 70B Versatile as Orchestra Conductor & Enhanced HTML-Formatted Responses!</strong></p>
470
  </div>
471
  """)
472
 
473
  # API Key Section
474
+ with gr.Group():
475
+ gr.HTML('<div class="api-key-section"><h3 style="color: white; margin-top: 0;">πŸ”‘ API Configuration</h3></div>')
476
+ with gr.Row():
477
+ api_key_input = gr.Textbox(
478
+ label="Enter your Groq API Key",
479
+ type="password",
480
+ placeholder="gsk_...",
481
+ info="Get your free API key from https://console.groq.com/keys"
482
+ )
483
+ api_status = gr.Textbox(
484
+ label="API Status",
485
+ interactive=False,
486
+ placeholder="Enter API key to validate..."
487
+ )
488
+
489
+ validate_btn = gr.Button("πŸ” Validate API Key", variant="primary")
490
+ validate_btn.click(
491
+ fn=validate_api_key,
492
+ inputs=[api_key_input],
493
+ outputs=[api_status]
494
+ )
495
 
496
  # Main Interface Tabs
497
  with gr.Tabs() as tabs:
498
 
499
  # Single Model Tab
500
  with gr.TabItem("🎯 Single Model Analysis"):
501
+ gr.Markdown("### Test individual reasoning models with beautiful HTML output")
502
 
503
+ with gr.Row():
504
  with gr.Column(scale=1):
505
  single_problem = gr.Textbox(
506
  label="Problem Statement",
507
  placeholder="Enter the problem you want to analyze...",
508
+ lines=4
509
  )
510
  single_context = gr.Textbox(
511
  label="Additional Context (Optional)",
512
  placeholder="Any additional context or constraints...",
513
+ lines=2
514
  )
515
  model_choice = gr.Dropdown(
516
  label="Choose Model",
517
  choices=[
518
+ "Deep Thinker (DeepSeek R1)",
519
+ "Quick Strategist (Qwen3 32B)",
520
+ "Detail Detective (QwQ 32B)"
521
  ],
522
+ value="Deep Thinker (DeepSeek R1)"
523
  )
524
+ single_analyze_btn = gr.Button("πŸš€ Analyze with HTML Output", variant="primary", size="lg")
525
 
526
  with gr.Column(scale=2):
527
  single_output = gr.HTML(label="Analysis Result", elem_classes=["html-content"])
528
+
529
+ single_analyze_btn.click(
530
+ fn=run_single_model,
531
+ inputs=[single_problem, model_choice, single_context],
532
+ outputs=[single_output]
533
+ )
534
 
535
  # Full Orchestra Tab
536
  with gr.TabItem("🎼 Full Orchestra Collaboration"):
537
+ gr.Markdown("### Run all three models collaboratively with Llama 3.3 70B as Orchestra Conductor and stunning HTML-formatted output")
538
 
539
+ with gr.Column():
540
+ with gr.Row():
541
+ with gr.Column(scale=1):
542
+ orchestra_problem = gr.Textbox(
543
+ label="Problem Statement",
544
+ placeholder="Enter a complex problem that benefits from multiple reasoning perspectives...",
545
+ lines=6
546
+ )
547
+ orchestra_context = gr.Textbox(
548
+ label="Additional Context (Optional)",
549
+ placeholder="Background information, constraints, or specific requirements...",
550
+ lines=3
551
+ )
552
+ orchestra_analyze_btn = gr.Button("🎼 Start Orchestra Analysis", variant="primary", size="lg")
553
 
554
+ # Orchestra Results
555
+ with gr.Column():
556
+ deep_output = gr.HTML(label="🎭 Deep Thinker Analysis", elem_classes=["html-content"])
557
+ strategic_output = gr.HTML(label="πŸš€ Quick Strategist Analysis", elem_classes=["html-content"])
558
+ detective_output = gr.HTML(label="πŸ” Detail Detective Analysis", elem_classes=["html-content"])
559
+ synthesis_output = gr.HTML(label="🎼 Final Orchestrated Solution (Llama 3.3 70B)", elem_classes=["html-content"])
560
+
561
+ orchestra_analyze_btn.click(
562
+ fn=run_full_orchestra,
563
+ inputs=[orchestra_problem, orchestra_context],
564
+ outputs=[deep_output, strategic_output, detective_output, synthesis_output]
565
+ )
566
 
567
  # Examples Tab
568
  with gr.TabItem("πŸ’‘ Example Problems"):
 
572
  **🏒 Business Strategy:**
573
  "Our tech startup has limited funding and needs to decide between focusing on product development or marketing. We have a working MVP but low user adoption. Budget is $50K for the next 6 months."
574
 
575
+ **πŸ€– Ethical AI:**
576
+ "Should autonomous vehicles prioritize passenger safety over pedestrian safety in unavoidable accident scenarios? Consider the ethical, legal, and practical implications for mass adoption."
577
 
578
  **🌍 Environmental Policy:**
579
  "Design a policy framework to reduce carbon emissions in urban areas by 40% within 10 years while maintaining economic growth and social equity."
580
 
581
+ **🧬 Scientific Research:**
582
+ "We've discovered a potential breakthrough in gene therapy for treating Alzheimer's, but it requires human trials. How should we proceed given the risks, benefits, regulatory requirements, and ethical considerations?"
583
+
584
  **πŸŽ“ Educational Innovation:**
585
  "How can we redesign traditional university education to better prepare students for the rapidly changing job market of the 2030s, considering AI, remote work, and emerging technologies?"
586
 
587
  **🏠 Urban Planning:**
588
  "A city of 500K people wants to build 10,000 affordable housing units but faces opposition from current residents, environmental concerns, and a $2B budget constraint. Develop a comprehensive solution."
589
+
590
+ **πŸš— Transportation Future:**
591
+ "Design a comprehensive transportation system for a smart city of 1 million people in 2035, integrating autonomous vehicles, public transit, and sustainable mobility."
592
  """)
593
 
594
  # Footer
595
  gr.HTML("""
596
+ <div style="text-align: center; margin-top: 30px; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; color: white;">
597
  <h3>🎼 How the Orchestra Works</h3>
598
+ <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 20px; margin: 20px 0;">
599
+ <div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;">
600
+ <h4>🎭 Deep Thinker (DeepSeek R1)</h4>
601
+ <p>Provides thorough philosophical and theoretical analysis with comprehensive reasoning chains</p>
602
  </div>
603
+ <div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;">
604
+ <h4>πŸš€ Quick Strategist (Qwen3 32B)</h4>
605
+ <p>Delivers practical strategies, action plans, and rapid decision-making frameworks</p>
606
  </div>
607
+ <div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;">
608
+ <h4>πŸ” Detail Detective (QwQ 32B)</h4>
609
+ <p>Conducts comprehensive investigation, fact-checking, and finds hidden connections</p>
610
  </div>
611
+ <div style="background: rgba(255,255,255,0.1); padding: 15px; border-radius: 10px;">
612
  <h4>🎼 Orchestra Conductor</h4>
613
+ <p>Synthesizes all perspectives into unified, comprehensive solutions</p>
614
  </div>
615
  </div>
616
+ <p style="margin-top: 20px;"><em>Built with ❀️ using Groq's lightning-fast inference, Gradio, and beautiful HTML formatting</em></p>
617
  </div>
618
  """)
619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620
  # Launch the app
621
  if __name__ == "__main__":
622
+ app.launch(share=False) # Set share=False for local running; use share=True for public sharing